code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = set()
# edges = list of graph's edges
__SCREAMING_SNAKE_CASE = get_edges(A_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = edges.pop()
chosen_vertices.add(A_ )
chosen_vertices.add(A_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(A_ )
return chosen_vertices
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 682 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case : Optional[int] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__snake_case : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 660 | 0 |
"""simple docstring"""
import requests
_UpperCamelCase = '''YOUR API KEY'''
def _A( lowerCAmelCase , lowerCAmelCase = giphy_api_key ):
A__ : Tuple = """+""".join(query.split() )
A__ : Tuple = F'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
A__ : Any = requests.get(A_ ).json()["""data"""]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 363 | '''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case : Union[str, Any] = ''''''
__snake_case : List[Any] = ''''''
__snake_case : List[str] = ''''''
__snake_case : Any = ''''''
def lowerCamelCase__ ( A_ ):
# authorize twitter, initialize tweepy
UpperCAmelCase_ = tweepy.OAuthHandler(A_ , A_ )
auth.set_access_token(A_ , A_ )
UpperCAmelCase_ = tweepy.API(A_ )
# initialize a list to hold all the tweepy Tweets
UpperCAmelCase_ = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCAmelCase_ = api.user_timeline(screen_name=A_ , count=200 )
# save most recent tweets
alltweets.extend(A_ )
# save the id of the oldest tweet less one
UpperCAmelCase_ = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(A_ ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
UpperCAmelCase_ = api.user_timeline(
screen_name=A_ , count=200 , max_id=A_ )
# save most recent tweets
alltweets.extend(A_ )
# update the id of the oldest tweet less one
UpperCAmelCase_ = alltweets[-1].id - 1
print(F"""...{len(A_ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCAmelCase_ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , "w" ) as f:
UpperCAmelCase_ = csv.writer(A_ )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(A_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 660 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
a : Tuple = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ['''DPTFeatureExtractor''']
a : Optional[int] = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 613 | '''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__snake_case : int = logging.get_logger(__name__)
class lowercase_ ( _A ):
def __init__( self , **UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["bs4"] )
super().__init__(**UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCAmelCase_ = parent.find_all(child.name , recursive=UpperCamelCase__ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(UpperCamelCase__ ) else next(i for i, s in enumerate(UpperCamelCase__ , 1 ) if s is child ) )
UpperCAmelCase_ = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = BeautifulSoup(UpperCamelCase__ , "html.parser" )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for element in html_code.descendants:
if type(UpperCamelCase__ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCAmelCase_ = html.unescape(UpperCamelCase__ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = self.xpath_soup(UpperCamelCase__ )
stringaxtag_seq.append(UpperCamelCase__ )
stringaxsubs_seq.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = ""
for tagname, subs in zip(UpperCamelCase__ , UpperCamelCase__ ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , UpperCamelCase__ ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase_ = False
# Check that strings has a valid type
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = True
elif isinstance(UpperCamelCase__ , (list, tuple) ):
if len(UpperCamelCase__ ) == 0 or isinstance(html_strings[0] , UpperCamelCase__ ):
UpperCAmelCase_ = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
F"""but is of type {type(UpperCamelCase__ )}.""" )
UpperCAmelCase_ = bool(isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(html_strings[0] , UpperCamelCase__ )) )
if not is_batched:
UpperCAmelCase_ = [html_strings]
# Get nodes + xpaths
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for html_string in html_strings:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.get_three_from_single(UpperCamelCase__ )
nodes.append(UpperCamelCase__ )
UpperCAmelCase_ = []
for node, tag_list, sub_list in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = self.construct_xpath(UpperCamelCase__ , UpperCamelCase__ )
xpath_strings.append(UpperCamelCase__ )
xpaths.append(UpperCamelCase__ )
# return as Dict
UpperCAmelCase_ = {"nodes": nodes, "xpaths": xpaths}
UpperCAmelCase_ = BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
return encoded_inputs
| 660 | 0 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a :Optional[Any] = False
class __a (unittest.TestCase):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str = """A painting of a squirrel eating a burger """
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = pipe(
prompt=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str = VersatileDiffusionTextToImagePipeline.from_pretrained(UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = pipe(
prompt=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(
"""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any = """A painting of a squirrel eating a burger """
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = pipe(
prompt=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
SCREAMING_SNAKE_CASE__ : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ : int = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 680 | '''simple docstring'''
def lowerCamelCase__ ( A_ , A_ ):
_validate_point(A_ )
_validate_point(A_ )
if len(A_ ) != len(A_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(A_ , A_ ) ) )
def lowerCamelCase__ ( A_ ):
if point:
if isinstance(A_ , A_ ):
for item in point:
if not isinstance(A_ , (int, float) ):
UpperCAmelCase_ = (
"Expected a list of numbers as input, found "
F"""{type(A_ ).__name__}"""
)
raise TypeError(A_ )
else:
UpperCAmelCase_ = F"""Expected a list of numbers as input, found {type(A_ ).__name__}"""
raise TypeError(A_ )
else:
raise ValueError("Missing an input" )
def lowerCamelCase__ ( A_ , A_ ):
_validate_point(A_ )
_validate_point(A_ )
if len(A_ ) != len(A_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(A_ , A_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.12.2"):
raise Exception("requires fairseq >= 0.12.2")
if version.parse(fairseq.__version__) > version.parse("2"):
raise Exception("requires fairseq < v2")
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = '''Hello, World!'''
lowercase_ = '''en_XX'''
def A_ ( lowercase , lowercase , lowercase ) -> int:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = Path("""data_bin""" )
UpperCAmelCase_ : List[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(A_ ).parent ) , checkpoint_file=Path(A_ ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(A_ ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(A_ ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(A_ )
UpperCAmelCase_ : Dict = xmod.model.encoder.sentence_encoder
UpperCAmelCase_ : List[Any] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
UpperCAmelCase_ : Dict = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , A_ )
UpperCAmelCase_ : Any = XmodForSequenceClassification(A_ ) if classification_head else XmodForMaskedLM(A_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase_ : Tuple = xmod_sent_encoder.embed_tokens.weight
UpperCAmelCase_ : Tuple = xmod_sent_encoder.embed_positions.weight
UpperCAmelCase_ : Any = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
UpperCAmelCase_ : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.weight
UpperCAmelCase_ : Optional[int] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase_ : Dict = model.roberta.encoder.layer[i]
UpperCAmelCase_ : Tuple = xmod_sent_encoder.layers[i]
# self attention
UpperCAmelCase_ : Union[str, Any] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
UpperCAmelCase_ : Optional[Any] = xmod_layer.self_attn.q_proj.weight
UpperCAmelCase_ : Tuple = xmod_layer.self_attn.q_proj.bias
UpperCAmelCase_ : List[str] = xmod_layer.self_attn.k_proj.weight
UpperCAmelCase_ : int = xmod_layer.self_attn.k_proj.bias
UpperCAmelCase_ : Optional[int] = xmod_layer.self_attn.v_proj.weight
UpperCAmelCase_ : Any = xmod_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase_ : List[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
UpperCAmelCase_ : Tuple = xmod_layer.self_attn.out_proj.weight
UpperCAmelCase_ : List[Any] = xmod_layer.self_attn.out_proj.bias
UpperCAmelCase_ : Optional[Any] = xmod_layer.self_attn_layer_norm.weight
UpperCAmelCase_ : int = xmod_layer.self_attn_layer_norm.bias
# intermediate
UpperCAmelCase_ : Dict = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
UpperCAmelCase_ : Optional[Any] = xmod_layer.fca.weight
UpperCAmelCase_ : int = xmod_layer.fca.bias
# output
UpperCAmelCase_ : str = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
UpperCAmelCase_ : Dict = xmod_layer.fca.weight
UpperCAmelCase_ : Optional[int] = xmod_layer.fca.bias
UpperCAmelCase_ : int = xmod_layer.final_layer_norm.weight
UpperCAmelCase_ : Any = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
UpperCAmelCase_ : Optional[Any] = xmod_layer.adapter_layer_norm.weight
UpperCAmelCase_ : Any = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
UpperCAmelCase_ : List[Any] = bert_output.adapter_modules[lang_code]
UpperCAmelCase_ : List[str] = xmod_layer.adapter_modules[lang_code]
UpperCAmelCase_ : Optional[Any] = from_adapter.fca.weight
UpperCAmelCase_ : Optional[Any] = from_adapter.fca.bias
UpperCAmelCase_ : Union[str, Any] = from_adapter.fca.weight
UpperCAmelCase_ : Tuple = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
UpperCAmelCase_ : int = xmod_sent_encoder.layer_norm.weight
UpperCAmelCase_ : Dict = xmod_sent_encoder.layer_norm.bias
if classification_head:
UpperCAmelCase_ : int = xmod.model.classification_heads["""mnli"""].dense.weight
UpperCAmelCase_ : Tuple = xmod.model.classification_heads["""mnli"""].dense.bias
UpperCAmelCase_ : Tuple = xmod.model.classification_heads["""mnli"""].out_proj.weight
UpperCAmelCase_ : Any = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
UpperCAmelCase_ : Tuple = xmod.model.encoder.lm_head.dense.weight
UpperCAmelCase_ : List[Any] = xmod.model.encoder.lm_head.dense.bias
UpperCAmelCase_ : str = xmod.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase_ : Dict = xmod.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase_ : List[str] = xmod.model.encoder.lm_head.weight
UpperCAmelCase_ : Tuple = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase_ : Union[str, Any] = xmod.encode(A_ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(A_ )
UpperCAmelCase_ : Any = model(A_ )[0]
if classification_head:
UpperCAmelCase_ : List[Any] = xmod.model.classification_heads["""mnli"""](xmod.extract_features(A_ ) )
else:
UpperCAmelCase_ : Optional[int] = xmod.model(A_ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
UpperCAmelCase_ : List[str] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
UpperCAmelCase_ : int = torch.allclose(A_ , A_ , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(A_ ).mkdir(parents=A_ , exist_ok=A_ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
lowercase_ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 470 | '''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__snake_case : Optional[int] = logging.getLogger(__name__)
def lowerCamelCase__ ( A_ , A_ ):
# save results
if os.path.exists(A_ ):
if os.path.exists(os.path.join(A_ , "config.json" ) ) and os.path.isfile(
os.path.join(A_ , "config.json" ) ):
os.remove(os.path.join(A_ , "config.json" ) )
if os.path.exists(os.path.join(A_ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(A_ , "pytorch_model.bin" ) ):
os.remove(os.path.join(A_ , "pytorch_model.bin" ) )
else:
os.makedirs(A_ )
model.save_pretrained(A_ )
def lowerCamelCase__ ( A_ , A_=False ):
UpperCAmelCase_ = 2
if unlogit:
UpperCAmelCase_ = torch.pow(A_ , A_ )
UpperCAmelCase_ = p * torch.log(A_ )
UpperCAmelCase_ = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase__ ( A_ ):
logger.info("lv, h >\t" + "\t".join(F"""{x + 1}""" for x in range(len(A_ ) ) ) )
for row in range(len(A_ ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=True , A_=True , A_=None , A_=False ):
UpperCAmelCase_ , UpperCAmelCase_ = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase_ = torch.zeros(A_ , A_ ).to(args.device )
UpperCAmelCase_ = torch.zeros(A_ , A_ ).to(args.device )
if head_mask is None:
UpperCAmelCase_ = torch.ones(A_ , A_ ).to(args.device )
head_mask.requires_grad_(requires_grad=A_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase_ = None
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 0.0
for step, inputs in enumerate(tqdm(A_ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase_ = tuple(t.to(args.device ) for t in inputs )
((UpperCAmelCase_) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase_ = model(A_ , labels=A_ , head_mask=A_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(A_ ):
UpperCAmelCase_ = entropy(attn.detach() , A_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(A_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase_ = 2
UpperCAmelCase_ = torch.pow(torch.pow(A_ , A_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
UpperCAmelCase_ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(A_ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(A_ )
logger.info("Head ranked by importance scores" )
UpperCAmelCase_ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase_ = torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase_ = head_ranks.view_as(A_ )
print_ad_tensor(A_ )
return attn_entropy, head_importance, total_loss
def lowerCamelCase__ ( A_ , A_ , A_ ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(A_ , A_ , A_ , compute_entropy=A_ )
UpperCAmelCase_ = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , A_ , original_score * args.masking_threshold )
UpperCAmelCase_ = torch.ones_like(A_ )
UpperCAmelCase_ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase_ = original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase_ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase_ = float("Inf" )
UpperCAmelCase_ = head_importance.view(-1 ).sort()[1]
if len(A_ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
UpperCAmelCase_ = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase_ = new_head_mask.view(-1 )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = new_head_mask.view_as(A_ )
UpperCAmelCase_ = new_head_mask.clone().detach()
print_ad_tensor(A_ )
# Compute metric and head importance again
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , head_mask=A_ )
UpperCAmelCase_ = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , A_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(A_ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase__ ( A_ , A_ , A_ , A_ ):
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(A_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(A_ , A_ ):
UpperCAmelCase_ = [
v,
]
assert sum(len(A_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(A_ )
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ , actually_pruned=A_ , )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , A_ , A_ , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , A_ , A_ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(A_ , args.output_dir )
def lowerCamelCase__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=A_ , type=A_ , required=A_ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=A_ , type=A_ , required=A_ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=A_ , type=A_ , required=A_ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=A_ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=A_ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=A_ , type=A_ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=A_ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=A_ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=A_ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=A_ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=A_ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=A_ , help="Batch size." )
parser.add_argument("--seed" , type=A_ , default=42 )
parser.add_argument("--local_rank" , type=A_ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=A_ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=A_ , default="" , help="Can be used for distant debugging." )
UpperCAmelCase_ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase_ = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
UpperCAmelCase_ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase_ = torch.device("cuda" , args.local_rank )
UpperCAmelCase_ = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase_ = nn.parallel.DistributedDataParallel(
A_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=A_ )
elif args.n_gpu > 1:
UpperCAmelCase_ = nn.DataParallel(A_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=A_ )
torch.save(A_ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , A_ )
# Prepare dataset
UpperCAmelCase_ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase_ = (torch.from_numpy(A_ ),)
UpperCAmelCase_ = TensorDataset(*A_ )
UpperCAmelCase_ = RandomSampler(A_ )
UpperCAmelCase_ = DataLoader(A_ , sampler=A_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(A_ , A_ , A_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase_ = mask_heads(A_ , A_ , A_ )
prune_heads(A_ , A_ , A_ , A_ )
if __name__ == "__main__":
main()
| 660 | 0 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_A: List[Any] = False
try:
_A: Tuple = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase :
def __init__( self , __A = None , __A = [] ):
__UpperCAmelCase = 0
__UpperCAmelCase = choices
__UpperCAmelCase = prompt
if sys.platform == "win32":
__UpperCAmelCase = '*'
else:
__UpperCAmelCase = '➔ '
def __lowerCamelCase ( self , __A , __A = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , UpperCamelCase__ )
else:
forceWrite(self.choices[index] , UpperCamelCase__ )
def __lowerCamelCase ( self , __A ):
if index == self.position:
forceWrite(f' {self.arrow_char} ' )
self.write_choice(UpperCamelCase__ )
else:
forceWrite(f' {self.choices[index]}' )
reset_cursor()
def __lowerCamelCase ( self , __A , __A = 1 ):
__UpperCAmelCase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(UpperCamelCase__ )
move_cursor(UpperCamelCase__ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def __lowerCamelCase ( self ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def __lowerCamelCase ( self ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def __lowerCamelCase ( self ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def __lowerCamelCase ( self ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(UpperCamelCase__ )] for number in range(10 )] )
def __lowerCamelCase ( self ):
__UpperCAmelCase = int(chr(self.current_selection ) )
__UpperCAmelCase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , UpperCamelCase__ )
else:
return
else:
return
def __lowerCamelCase ( self , __A = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
__UpperCAmelCase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(UpperCamelCase__ )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
__UpperCAmelCase = int(builtins.input() )
except ValueError:
__UpperCAmelCase = default_choice
else:
__UpperCAmelCase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(UpperCamelCase__ , '\n' )
return choice
| 126 | '''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__snake_case : str = logging.getLogger(__name__)
def lowerCamelCase__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=A_ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=A_ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=A_ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=A_ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase_ = parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
UpperCAmelCase_ = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase_ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase_ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase_ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase_ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase_ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase_ = fp.readlines()
logger.info("Start encoding" )
logger.info(F"""{len(A_ )} examples to process.""" )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
UpperCAmelCase_ = 10_000
UpperCAmelCase_ = time.time()
for text in data:
UpperCAmelCase_ = F"""{bos} {text.strip()} {sep}"""
UpperCAmelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
rslt.append(A_ )
iter += 1
if iter % interval == 0:
UpperCAmelCase_ = time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
UpperCAmelCase_ = time.time()
logger.info("Finished binarization" )
logger.info(F"""{len(A_ )} examples processed.""" )
UpperCAmelCase_ = F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
UpperCAmelCase_ = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase_ = [np.uintaa(A_ ) for d in rslt]
else:
UpperCAmelCase_ = [np.intaa(A_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(A_ , "wb" ) as handle:
pickle.dump(rslt_ , A_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 660 | 0 |
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _A ( _A ):
lowercase_ : Optional[Any] = '''dandelin/vilt-b32-finetuned-vqa'''
lowercase_ : List[Any] = (
'''This is a tool that answers a question about an image. It takes an input named `image` which should be the '''
'''image containing the information, as well as a `question` which should be the question in English. It '''
'''returns a text that is the answer to the question.'''
)
lowercase_ : int = '''image_qa'''
lowercase_ : Optional[Any] = AutoProcessor
lowercase_ : Optional[int] = AutoModelForVisualQuestionAnswering
lowercase_ : str = ['''image''', '''text''']
lowercase_ : Union[str, Any] = ['''text''']
def __init__( self : List[str] , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[Any] ):
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
def a ( self : str , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] ):
"""simple docstring"""
return self.pre_processor(UpperCamelCase__ , UpperCamelCase__ , return_tensors="""pt""" )
def a ( self : Any , lowerCamelCase__ : Dict ):
"""simple docstring"""
with torch.no_grad():
return self.model(**UpperCamelCase__ ).logits
def a ( self : str , lowerCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCamelCase : List[Any] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 269 | '''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__snake_case : Dict = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__snake_case : str = json.load(f)
@require_torch
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = FSMTForConditionalGeneration.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = F"""facebook/wmt19-{pair}"""
UpperCAmelCase_ = self.get_tokenizer(UpperCamelCase__ )
UpperCAmelCase_ = self.get_model(UpperCamelCase__ )
UpperCAmelCase_ = bleu_data[pair]["src"]
UpperCAmelCase_ = bleu_data[pair]["tgt"]
UpperCAmelCase_ = tokenizer(UpperCamelCase__ , return_tensors="pt" , truncation=UpperCamelCase__ , padding="longest" ).to(UpperCamelCase__ )
UpperCAmelCase_ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase_ = tokenizer.batch_decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
UpperCAmelCase_ = calculate_bleu(UpperCamelCase__ , UpperCamelCase__ )
print(UpperCamelCase__ )
self.assertGreaterEqual(scores["bleu"] , UpperCamelCase__ )
| 660 | 0 |
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = """"""
for word_or_phrase in separated:
if not isinstance(A_ , A_ ):
raise Exception("""join() accepts only strings to be joined""" )
joined += word_or_phrase + separator
return joined.strip(A_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 533 | '''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__snake_case : List[Any] = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Optional[int] = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : int = {
'''sample_size''': 2_56,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Dict = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : Tuple = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : str = {
'''num_train_timesteps''': 1_51,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def lowerCamelCase__ ( A_ ):
if isinstance(A_ , A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=False ):
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=None ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.bias"""]
UpperCAmelCase_ = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = (
checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ ):
UpperCAmelCase_ = torch.load(A_ , map_location="cpu" )
UpperCAmelCase_ = {}
UpperCAmelCase_ = checkpoint["time_embed.0.weight"]
UpperCAmelCase_ = checkpoint["time_embed.0.bias"]
UpperCAmelCase_ = checkpoint["time_embed.2.weight"]
UpperCAmelCase_ = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase_ = checkpoint["label_emb.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.bias"]
UpperCAmelCase_ = unet_config["down_block_types"]
UpperCAmelCase_ = unet_config["layers_per_block"]
UpperCAmelCase_ = unet_config["attention_head_dim"]
UpperCAmelCase_ = unet_config["block_out_channels"]
UpperCAmelCase_ = 1
UpperCAmelCase_ = channels_list[0]
for i, layer_type in enumerate(A_ ):
UpperCAmelCase_ = channels_list[i]
UpperCAmelCase_ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""down_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""down_blocks.{i}.downsamplers.0"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
current_layer += 1
UpperCAmelCase_ = current_channels
# hardcoded the mid-block for now
UpperCAmelCase_ = "mid_block.resnets.0"
UpperCAmelCase_ = "middle_block.0"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.attentions.0"
UpperCAmelCase_ = "middle_block.1"
UpperCAmelCase_ = convert_attention(A_ , A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.resnets.1"
UpperCAmelCase_ = "middle_block.2"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = 0
UpperCAmelCase_ = unet_config["up_block_types"]
for i, layer_type in enumerate(A_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.1"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""up_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.2"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = checkpoint["out.0.weight"]
UpperCAmelCase_ = checkpoint["out.0.bias"]
UpperCAmelCase_ = checkpoint["out.2.weight"]
UpperCAmelCase_ = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
__snake_case : List[str] = parser.parse_args()
__snake_case : Any = strabool(args.class_cond)
__snake_case : List[str] = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
__snake_case : Optional[int] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__snake_case : List[str] = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
__snake_case : Optional[Any] = None
__snake_case : Optional[int] = con_pt_to_diffuser(args.unet_path, unet_config)
__snake_case : str = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__snake_case : Tuple = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__snake_case : Optional[int] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
__snake_case : Optional[Any] = CMStochasticIterativeScheduler(**scheduler_config)
__snake_case : Dict = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 660 | 0 |
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__SCREAMING_SNAKE_CASE =re.compile(r"""^(?P<major>\d+)""" r"""\.(?P<minor>\d+)""" r"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class __magic_name__ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = 42
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
def _A ( self: Optional[int] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _str_to_version_tuple(self.version_str )
def __repr__( self: Tuple ):
return f"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def _A ( self: Union[str, Any] ):
return self.major, self.minor, self.patch
def _A ( self: Union[str, Any] , _lowerCamelCase: List[str] ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return Version(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return other
raise TypeError(f"{other} (type {type(UpperCamelCase__ )}) cannot be compared to version." )
def __eq__( self: Optional[int] , _lowerCamelCase: Optional[Any] ):
try:
SCREAMING_SNAKE_CASE_ = self._validate_operand(UpperCamelCase__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self: Any , _lowerCamelCase: Any ):
SCREAMING_SNAKE_CASE_ = self._validate_operand(UpperCamelCase__ )
return self.tuple < other.tuple
def __hash__( self: Any ):
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def _A ( cls: List[Any] , _lowerCamelCase: Optional[Any] ):
SCREAMING_SNAKE_CASE_ = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def _A ( self: Union[str, Any] ):
return self.version_str
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = _VERSION_REG.match(A_ )
if not res:
raise ValueError(F"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." )
return tuple(int(A_ ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def a (_lowerCAmelCase ):
return ".".join(str(A_ ) for v in version_tuple )
| 234 | '''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
__snake_case : Any = _symbol_database.Default()
__snake_case : Dict = _descriptor_pool.Default().AddSerializedFile(
B'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'''
)
__snake_case : Union[str, Any] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
__snake_case : Any = None
__snake_case : Dict = B'''H\003'''
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
__snake_case : Union[str, Any] = 45
__snake_case : str = 15_81
__snake_case : Optional[int] = 15_17
__snake_case : Optional[Any] = 15_70
__snake_case : Union[str, Any] = 15_84
__snake_case : Any = 17_93
__snake_case : Optional[int] = 17_95
__snake_case : Tuple = 19_16
__snake_case : int = 18_64
__snake_case : Any = 19_05
__snake_case : Optional[int] = 19_19
__snake_case : str = 24_29
__snake_case : Tuple = 22_08
__snake_case : str = 24_18
__snake_case : Tuple = 23_23
__snake_case : Optional[int] = 24_07
# @@protoc_insertion_point(module_scope)
| 660 | 0 |
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 406 | '''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase_ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = "The dog is cute and lives in the garden house"
UpperCAmelCase_ = jnp.array([tokenizer.encode(UpperCamelCase__ )] )
UpperCAmelCase_ = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
UpperCAmelCase_ = model(UpperCamelCase__ )["last_hidden_state"]
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1e-3 ) )
| 660 | 0 |
"""simple docstring"""
from math import loga
def _UpperCamelCase ( UpperCamelCase ) -> Any:
"""simple docstring"""
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(A_ , A_ ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77 | '''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A_ , A_ , A_ ):
# Construct model
if gpta_config_file == "":
UpperCAmelCase_ = GPTaConfig()
else:
UpperCAmelCase_ = GPTaConfig.from_json_file(A_ )
UpperCAmelCase_ = GPTaModel(A_ )
# Load weights from numpy
load_tf_weights_in_gpta(A_ , A_ , A_ )
# Save pytorch-model
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , A_ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(A_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
__snake_case : Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase__ : List[Any] , ) -> List[str]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = 1_3
__SCREAMING_SNAKE_CASE = 7
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = 9_9
__SCREAMING_SNAKE_CASE = 3_2
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = 3_7
__SCREAMING_SNAKE_CASE = "gelu"
__SCREAMING_SNAKE_CASE = 0.1
__SCREAMING_SNAKE_CASE = 0.1
__SCREAMING_SNAKE_CASE = 5_1_2
__SCREAMING_SNAKE_CASE = 1_6
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 0.02
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = None
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = TFDistilBertModel(config=UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask}
__SCREAMING_SNAKE_CASE = model(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = [input_ids, input_mask]
__SCREAMING_SNAKE_CASE = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = TFDistilBertForMaskedLM(config=UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask}
__SCREAMING_SNAKE_CASE = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any ) -> str:
__SCREAMING_SNAKE_CASE = TFDistilBertForQuestionAnswering(config=UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
__SCREAMING_SNAKE_CASE = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = TFDistilBertForSequenceClassification(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask}
__SCREAMING_SNAKE_CASE = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
__SCREAMING_SNAKE_CASE = self.num_choices
__SCREAMING_SNAKE_CASE = TFDistilBertForMultipleChoice(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
__SCREAMING_SNAKE_CASE = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : int ) -> str:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = TFDistilBertForTokenClassification(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask}
__SCREAMING_SNAKE_CASE = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( _A , _A , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
snake_case__ : Optional[Any] = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ : int = False
snake_case__ : Tuple = False
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = TFDistilBertModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCamelCase__ , dim=3_7 )
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCamelCase__ )
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCamelCase__ )
def UpperCAmelCase_ ( self : int ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCamelCase__ )
@slow
def UpperCAmelCase_ ( self : Tuple ) -> int:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__SCREAMING_SNAKE_CASE = TFDistilBertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self : int ) -> int:
__SCREAMING_SNAKE_CASE = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
__SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE = model(UpperCamelCase__ )[0]
__SCREAMING_SNAKE_CASE = [1, 6, 7_6_8]
self.assertEqual(output.shape , UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 )
| 682 | '''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase__ ( A_ ):
def decorator(A_ ):
UpperCAmelCase_ = getattr(A_ , "handle_key" , [] )
handle += [key]
setattr(A_ , "handle_key" , A_ )
return func
return decorator
def lowerCamelCase__ ( *A_ ):
def decorator(A_ ):
UpperCAmelCase_ = getattr(A_ , "handle_key" , [] )
handle += keys
setattr(A_ , "handle_key" , A_ )
return func
return decorator
class lowercase_ ( _A ):
def __new__( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = super().__new__(cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not hasattr(UpperCamelCase__ , "key_handler" ):
setattr(UpperCamelCase__ , "key_handler" , {} )
setattr(UpperCamelCase__ , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
UpperCAmelCase_ = getattr(UpperCamelCase__ , "handle_key" , [] )
for key in handled_keys:
UpperCAmelCase_ = value
return new_cls
@staticmethod
def lowerCamelCase_ ( cls ) -> str:
"""simple docstring"""
UpperCAmelCase_ = get_character()
if char != KEYMAP["undefined"]:
UpperCAmelCase_ = ord(UpperCamelCase__ )
UpperCAmelCase_ = cls.key_handler.get(UpperCamelCase__ )
if handler:
UpperCAmelCase_ = char
return handler(cls )
else:
return None
def lowerCamelCase__ ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 660 | 0 |
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_UpperCamelCase = 0
_UpperCamelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_UpperCamelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_UpperCamelCase = tuple[int, int]
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
'''simple docstring'''
A__ : List[Any] = pos_x
A__ : Dict = pos_y
A__ : List[Any] = (pos_y, pos_x)
A__ : List[Any] = goal_x
A__ : Tuple = goal_y
A__ : str = g_cost
A__ : Dict = parent
A__ : Optional[Any] = self.calculate_heuristic()
A__ : Any = self.g_cost + self.h_cost
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : List[str] = self.pos_x - self.goal_x
A__ : int = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(UpperCamelCase__ ) + abs(UpperCamelCase__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , snake_case_ ):
'''simple docstring'''
return self.f_cost < other.f_cost
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
A__ : Any = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCamelCase__ )
A__ : Union[str, Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , UpperCamelCase__ )
A__ : List[Any] = [self.start]
A__ : int = []
A__ : Tuple = False
def lowerCamelCase ( self ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
A__ : Any = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(UpperCamelCase__ )
self.closed_nodes.append(UpperCamelCase__ )
A__ : List[str] = self.get_successors(UpperCamelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCamelCase__ )
else:
# retrieve the best current path
A__ : int = self.open_nodes.pop(self.open_nodes.index(UpperCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCamelCase__ )
else:
self.open_nodes.append(UpperCamelCase__ )
return [self.start.pos]
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
A__ : str = []
for action in delta:
A__ : List[str] = parent.pos_x + action[1]
A__ : int = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCamelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCamelCase__ , UpperCamelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCamelCase__ , ) )
return successors
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
A__ : Any = node
A__ : List[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
A__ : List[Any] = current_node.parent
path.reverse()
return path
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
A__ : str = AStar(UpperCamelCase__ , UpperCamelCase__ )
A__ : int = AStar(UpperCamelCase__ , UpperCamelCase__ )
A__ : List[Any] = False
def lowerCamelCase ( self ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
A__ : Tuple = self.fwd_astar.open_nodes.pop(0 )
A__ : List[str] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
UpperCamelCase__ , UpperCamelCase__ )
self.fwd_astar.closed_nodes.append(UpperCamelCase__ )
self.bwd_astar.closed_nodes.append(UpperCamelCase__ )
A__ : List[str] = current_bwd_node
A__ : Any = current_fwd_node
A__ : str = {
self.fwd_astar: self.fwd_astar.get_successors(UpperCamelCase__ ),
self.bwd_astar: self.bwd_astar.get_successors(UpperCamelCase__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(UpperCamelCase__ )
else:
# retrieve the best current path
A__ : Dict = astar.open_nodes.pop(
astar.open_nodes.index(UpperCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(UpperCamelCase__ )
else:
astar.open_nodes.append(UpperCamelCase__ )
return [self.fwd_astar.start.pos]
def lowerCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
A__ : Dict = self.fwd_astar.retrace_path(UpperCamelCase__ )
A__ : List[str] = self.bwd_astar.retrace_path(UpperCamelCase__ )
bwd_path.pop()
bwd_path.reverse()
A__ : Any = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_UpperCamelCase = (0, 0)
_UpperCamelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_UpperCamelCase = time.time()
_UpperCamelCase = AStar(init, goal)
_UpperCamelCase = a_star.search()
_UpperCamelCase = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
_UpperCamelCase = time.time()
_UpperCamelCase = BidirectionalAStar(init, goal)
_UpperCamelCase = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 363 | '''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__snake_case : Optional[Any] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowercase_ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=1_6 , UpperCamelCase__=1_3 , UpperCamelCase__=7 , UpperCamelCase__=1_4 , UpperCamelCase__=1_0 , UpperCamelCase__=1_9 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=True , UpperCamelCase__=1_6 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=[1, 2, 3, 4, 5] , UpperCamelCase__=2_5 , UpperCamelCase__=5 , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = d_model
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = prediction_length
UpperCAmelCase_ = context_length
UpperCAmelCase_ = cardinality
UpperCAmelCase_ = num_time_features
UpperCAmelCase_ = lags_sequence
UpperCAmelCase_ = embedding_dimension
UpperCAmelCase_ = is_training
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = context_length
UpperCAmelCase_ = prediction_length + label_length
UpperCAmelCase_ = label_length
UpperCAmelCase_ = moving_average
UpperCAmelCase_ = autocorrelation_factor
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = config.context_length + max(config.lags_sequence )
UpperCAmelCase_ = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
UpperCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
UpperCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length] )
UpperCAmelCase_ = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.prepare_autoformer_inputs_dict(UpperCamelCase__ )
return config, inputs_dict
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModel(config=UpperCamelCase__ ).to(UpperCamelCase__ ).eval()
UpperCAmelCase_ = model(**UpperCamelCase__ )
UpperCAmelCase_ = outputs.encoder_last_hidden_state
UpperCAmelCase_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = model.get_encoder()
encoder.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = AutoformerEncoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = model.create_network_inputs(**UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
UpperCAmelCase_ = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
UpperCAmelCase_ = encoder(inputs_embeds=UpperCamelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
UpperCAmelCase_ = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
UpperCAmelCase_ = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
UpperCAmelCase_ = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
UpperCAmelCase_ = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = model.get_decoder()
decoder.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = AutoformerDecoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCAmelCase_ = decoder(
trend=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowercase_ ( _A , _A , unittest.TestCase ):
a_ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a_ = (AutoformerForPrediction,) if is_torch_available() else ()
a_ = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model_class.from_pretrained(UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
self.assertEqual(info["missing_keys"] , [] )
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCamelCase__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
pass
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = inspect.signature(getattr(UpperCamelCase__ , "forward" ) )
# The main input is the name of the argument after `self`
UpperCAmelCase_ = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCamelCase__ )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(UpperCamelCase__ )] , UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
UpperCAmelCase_ = getattr(self.model_tester , "seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "decoder_seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "encoder_seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "d_model" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "num_attention_heads" , UpperCamelCase__ )
UpperCAmelCase_ = d_model // num_attention_heads
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
UpperCAmelCase_ = len(UpperCamelCase__ )
UpperCAmelCase_ = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# decoder attentions
UpperCAmelCase_ = outputs.decoder_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
UpperCAmelCase_ = outputs.cross_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 2 , len(UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowerCamelCase__ ( A_="train-batch.pt" ):
UpperCAmelCase_ = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=A_ , repo_type="dataset" )
UpperCAmelCase_ = torch.load(A_ , map_location=A_ )
return batch
@require_torch
@slow
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch()
with torch.no_grad():
UpperCAmelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
UpperCAmelCase_ = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
UpperCAmelCase_ = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase_ = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
UpperCAmelCase_ = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=UpperCamelCase__ )
UpperCAmelCase_ = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCamelCase__ , rtol=1e-1 ) )
| 660 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Optional[int] = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 613 | '''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : Tuple = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
__snake_case : Tuple = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
__snake_case : Dict = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def lowerCamelCase__ ( A_ , A_ ):
with open(A_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
with open(A_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(A_ ):
UpperCAmelCase_ = b
UpperCAmelCase_ = idx
for wd in b:
UpperCAmelCase_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowercase_ ( _A ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|startoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ) -> int:
"""simple docstring"""
super().__init__(
unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , do_clean_text=UpperCamelCase__ , **UpperCamelCase__ , )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
UpperCAmelCase_ = do_clean_text
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = load_vocab_and_emoji(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return len(self.raw_vocab )
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(UpperCamelCase__ , clean=self.do_clean_text )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> int:
"""simple docstring"""
return self.vocab.get(UpperCamelCase__ , self.vocab.get(self.unk_token ) )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = "".join(UpperCamelCase__ ).strip()
return out_string
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ = 0
if os.path.isdir(UpperCamelCase__ ):
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase_ = token_index
writer.write(",".join(UpperCamelCase__ ) + "\n" )
index += 1
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , UpperCamelCase__ )
return vocab_file, emoji_file
class lowercase_ ( _A ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = vocab # same as swe
UpperCAmelCase_ = ids_to_tokens # same as bpe
UpperCAmelCase_ = emoji
UpperCAmelCase_ = np.max([len(UpperCamelCase__ ) for w in self.vocab.keys()] )
UpperCAmelCase_ = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
UpperCAmelCase_ = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
UpperCAmelCase_ = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
UpperCAmelCase_ = re.compile(
R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
UpperCAmelCase_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
UpperCAmelCase_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
UpperCAmelCase_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self ) -> int:
"""simple docstring"""
return len(self.ids_to_tokens )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = self.content_repattera.sub("<URL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<EMAIL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<TEL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<PRICE>" , UpperCamelCase__ )
UpperCAmelCase_ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=False ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace("\r\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\r" , "<BR>" )
UpperCAmelCase_ = text.replace("\t" , "<TAB>" )
UpperCAmelCase_ = text.replace("—" , "ー" )
UpperCAmelCase_ = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase_ = text.replace(UpperCamelCase__ , UpperCamelCase__ )
if clean:
UpperCAmelCase_ = self.clean_text(UpperCamelCase__ )
def check_simbol(UpperCamelCase__ ):
UpperCAmelCase_ = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 2:
UpperCAmelCase_ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc_2_a_1 and c <= 0Xc_2_b_f)
or (c >= 0Xc_7_8_0 and c <= 0Xc_7_8_3)
or (c >= 0Xc_a_b_9 and c <= 0Xc_b_b_f)
or (c >= 0Xc_c_8_0 and c <= 0Xc_d_a_2)
):
return True
return False
def checkuae(UpperCamelCase__ ):
UpperCAmelCase_ = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 3:
UpperCAmelCase_ = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe_2_8_0_8_0 and c <= 0Xe_2_b_0_7_f:
return True
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
while pos < len(UpperCamelCase__ ):
UpperCAmelCase_ = min(len(UpperCamelCase__ ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
UpperCAmelCase_ = [] # (token_id, token, pos)
for e in range(UpperCamelCase__ , UpperCamelCase__ , -1 ):
UpperCAmelCase_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(UpperCamelCase__ ) > 2:
UpperCAmelCase_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(UpperCamelCase__ ) > 0:
# the smallest token_id is adopted
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[0] )[0]
result.append(UpperCamelCase__ )
UpperCAmelCase_ = e
else:
UpperCAmelCase_ = pos + 1
UpperCAmelCase_ = text[pos:end]
if check_simbol(UpperCamelCase__ ):
result.append("<KIGOU>" )
elif checkuae(UpperCamelCase__ ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
UpperCAmelCase_ = end
return result
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__="\n" ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(UpperCamelCase__ )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = "".join(UpperCamelCase__ )
return text
| 660 | 0 |
"""simple docstring"""
from __future__ import annotations
a :str = 10
def _lowercase ( __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ : Dict = 1
SCREAMING_SNAKE_CASE__ : Any = max(A_ )
while placement <= max_digit:
# declare and initialize empty buckets
SCREAMING_SNAKE_CASE__ : int = [[] for _ in range(A_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
SCREAMING_SNAKE_CASE__ : List[Any] = int((i / placement) % RADIX )
buckets[tmp].append(A_ )
# put each buckets' contents into list_of_ints
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
for b in range(A_ ):
for i in buckets[b]:
SCREAMING_SNAKE_CASE__ : Optional[int] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | '''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__snake_case : Union[str, Any] = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def lowerCamelCase__ ( ):
UpperCAmelCase_ = Github(os.environ["GITHUB_TOKEN"] )
UpperCAmelCase_ = g.get_repo("huggingface/diffusers" )
UpperCAmelCase_ = repo.get_issues(state="open" )
for issue in open_issues:
UpperCAmelCase_ = sorted(issue.get_comments() , key=lambda A_ : i.created_at , reverse=A_ )
UpperCAmelCase_ = comments[0] if len(A_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 660 | 0 |
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
lowercase_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
lowercase_ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
lowercase_ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def a ( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def a ( self : int )-> List[str]:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def a ( self : Union[str, Any] , a_ : List[Any] , a_ : str , a_ : Dict=None , a_ : Optional[int]="uniform_average" , a_ : str=True )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = mean_squared_error(
UpperCamelCase__ , UpperCamelCase__ , sample_weight=UpperCamelCase__ , multioutput=UpperCamelCase__ , squared=UpperCamelCase__ )
return {"mse": mse}
| 470 | '''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__snake_case : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase_ ( datasets.BuilderConfig ):
a_ = 1_0000
a_ = None
a_ = None
class lowercase_ ( datasets.ArrowBasedBuilder ):
a_ = ParquetConfig
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCAmelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase__ , (str, list, tuple) ):
UpperCAmelCase_ = data_files
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCAmelCase_ = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCamelCase__ ):
with open(UpperCamelCase__ , "rb" ) as f:
UpperCAmelCase_ = datasets.Features.from_arrow_schema(pq.read_schema(UpperCamelCase__ ) )
break
splits.append(datasets.SplitGenerator(name=UpperCamelCase__ , gen_kwargs={"files": files} ) )
return splits
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase_ = table_cast(UpperCamelCase__ , self.info.features.arrow_schema )
return pa_table
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase__ ) ):
with open(UpperCamelCase__ , "rb" ) as f:
UpperCAmelCase_ = pq.ParquetFile(UpperCamelCase__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
UpperCAmelCase_ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(UpperCamelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCamelCase__ )}: {e}""" )
raise
| 660 | 0 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_A: Optional[Any] = argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
_A: Tuple = parser.parse_args()
_A: Optional[Any] = '''cpu'''
_A: Dict = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
_A: List[str] = '''path-to-your-trained-model'''
_A: int = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_A: List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_A: List[Any] = pipe.to(device)
# to channels last
_A: Optional[int] = pipe.unet.to(memory_format=torch.channels_last)
_A: List[str] = pipe.vae.to(memory_format=torch.channels_last)
_A: Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_A: Any = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_A: Optional[Any] = torch.randn(2, 4, 64, 64)
_A: Dict = torch.rand(1) * 999
_A: Optional[Any] = torch.randn(2, 77, 768)
_A: str = (sample, timestep, encoder_hidden_status)
try:
_A: Any = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_A: Tuple = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_A: Dict = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_A: Optional[int] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_A: Optional[int] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_A: Optional[Any] = 666
_A: Any = torch.Generator(device).manual_seed(seed)
_A: str = {'''generator''': generator}
if args.steps is not None:
_A: int = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_A: Optional[int] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""")
| 126 | '''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : Tuple = {'''vocab_file''': '''spiece.model'''}
__snake_case : Dict = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
__snake_case : Tuple = {
'''AI-Sweden/gpt-sw3-126m''': 20_48,
'''AI-Sweden/gpt-sw3-350m''': 20_48,
'''AI-Sweden/gpt-sw3-1.6b''': 20_48,
'''AI-Sweden/gpt-sw3-6.7b''': 20_48,
'''AI-Sweden/gpt-sw3-20b''': 20_48,
}
class lowercase_ ( _A ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
"""simple docstring"""
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase_ = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
UpperCAmelCase_ = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
UpperCAmelCase_ = "<|endoftext|>" if eos_token is None else eos_token
UpperCAmelCase_ = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
UpperCAmelCase_ = unk_token if pad_token is None else pad_token
UpperCAmelCase_ = eos_token if bos_token is None else bos_token
else:
UpperCAmelCase_ = "<pad>" if pad_token is None else pad_token
UpperCAmelCase_ = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# Used for whitespace normalization in input texts
# fmt : off
UpperCAmelCase_ = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
UpperCAmelCase_ = re.compile(
F"""[{"".join(map(UpperCamelCase__ , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]""" )
def __getstate__( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
return len(self.sp_model )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = self.non_printing_characters_re.sub("" , UpperCamelCase__ )
# Normalize whitespaces
UpperCAmelCase_ = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
UpperCAmelCase_ = unicodedata.normalize("NFC" , UpperCamelCase__ )
return text
def lowerCamelCase_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.preprocess_text(UpperCamelCase__ )
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCamelCase__ )
@staticmethod
def lowerCamelCase_ ( UpperCamelCase__ ) -> str:
"""simple docstring"""
return out_string
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = ""
UpperCAmelCase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
UpperCAmelCase_ = True
UpperCAmelCase_ = []
else:
current_sub_tokens.append(UpperCamelCase__ )
UpperCAmelCase_ = False
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string
def lowerCamelCase_ ( self ) -> Dict[str, int]:
"""simple docstring"""
UpperCAmelCase_ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = self.preprocess_text(UpperCamelCase__ )
UpperCAmelCase_ = self.sp_model.encode(UpperCamelCase__ )
else:
UpperCAmelCase_ = [self.preprocess_text(UpperCamelCase__ ) for t in text]
UpperCAmelCase_ = self.sp_model.encode(UpperCamelCase__ )
if return_tensors is True or return_tensors == "pt":
UpperCAmelCase_ = torch.tensor(UpperCamelCase__ )
return token_ids
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
return self.sp_model.decode(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
UpperCAmelCase_ = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(UpperCamelCase__ ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=UpperCamelCase__ )
| 660 | 0 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : str ) -> Union[str, Any]:
# Construct model
if gpta_config_file == "":
__UpperCamelCase : Any = GPTaConfig()
else:
__UpperCamelCase : str = GPTaConfig.from_json_file(A_ )
__UpperCamelCase : Dict = GPTaModel(A_ )
# Load weights from numpy
load_tf_weights_in_gpta(A_ , A_ , A_ )
# Save pytorch-model
__UpperCamelCase : Optional[Any] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__UpperCamelCase : str = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , A_ )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(A_ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
UpperCamelCase = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 269 | '''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=3 , UpperCamelCase__=1_8 , UpperCamelCase__=3_0 , UpperCamelCase__=4_0_0 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=[0.5, 0.5, 0.5] , UpperCamelCase__=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 1_8}
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
def lowerCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase_ ( _A , unittest.TestCase ):
a_ = LevitImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = LevitImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "size" ) )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
pass
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 660 | 0 |
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class lowercase__ ( _A ):
'''simple docstring'''
def __init__( self , __snake_case=None , __snake_case=None , *__snake_case , **__snake_case ):
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
if config is None:
assert isinstance(self.model , UpperCamelCase__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f""" {self.model.__class__}"""
)
_SCREAMING_SNAKE_CASE : str = self.model.config
else:
_SCREAMING_SNAKE_CASE : Any = config
_SCREAMING_SNAKE_CASE : Dict = data_args
_SCREAMING_SNAKE_CASE : Optional[int] = self.config.tgt_vocab_size if isinstance(self.config , UpperCamelCase__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
""" padding..""" )
if self.args.label_smoothing == 0:
_SCREAMING_SNAKE_CASE : Tuple = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_SCREAMING_SNAKE_CASE : Any = label_smoothed_nll_loss
def UpperCAmelCase_ ( self , __snake_case ):
if self.optimizer is None:
_SCREAMING_SNAKE_CASE : Tuple = ["""bias""", """LayerNorm.weight"""]
_SCREAMING_SNAKE_CASE : List[Any] = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_SCREAMING_SNAKE_CASE : Any = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_SCREAMING_SNAKE_CASE : Tuple = Adafactor
_SCREAMING_SNAKE_CASE : int = {"""scale_parameter""": False, """relative_step""": False}
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = AdamW
_SCREAMING_SNAKE_CASE : Any = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_SCREAMING_SNAKE_CASE : int = self.args.learning_rate
if self.sharded_ddp:
_SCREAMING_SNAKE_CASE : Tuple = OSS(
params=UpperCamelCase__ , optim=UpperCamelCase__ , **UpperCamelCase__ , )
else:
_SCREAMING_SNAKE_CASE : Any = optimizer_cls(UpperCamelCase__ , **UpperCamelCase__ )
if self.lr_scheduler is None:
_SCREAMING_SNAKE_CASE : int = self._get_lr_scheduler(UpperCamelCase__ )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : List[Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_SCREAMING_SNAKE_CASE : Union[str, Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_SCREAMING_SNAKE_CASE : List[Any] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_SCREAMING_SNAKE_CASE : Tuple = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=UpperCamelCase__ )
return scheduler
def UpperCAmelCase_ ( self ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_SCREAMING_SNAKE_CASE : str = model(**UpperCamelCase__ , use_cache=UpperCamelCase__ )[0]
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = model(**UpperCamelCase__ , labels=UpperCamelCase__ , use_cache=UpperCamelCase__ )[:2]
else:
# compute label smoothed loss
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(**UpperCamelCase__ , use_cache=UpperCamelCase__ )[0]
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = self.loss_fn(UpperCamelCase__ , UpperCamelCase__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Dict = inputs.pop("""labels""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = self._compute_loss(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return loss
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case , __snake_case = None , ):
_SCREAMING_SNAKE_CASE : Dict = self._prepare_inputs(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE : str = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **UpperCamelCase__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_SCREAMING_SNAKE_CASE : Optional[int] = self._pad_tensors_to_max_len(UpperCamelCase__ , gen_kwargs["""max_length"""] )
_SCREAMING_SNAKE_CASE : str = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = self._compute_loss(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_SCREAMING_SNAKE_CASE : List[str] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_SCREAMING_SNAKE_CASE : Optional[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_SCREAMING_SNAKE_CASE : Dict = self._pad_tensors_to_max_len(UpperCamelCase__ , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f""" padded to `max_length`={max_length}""" )
_SCREAMING_SNAKE_CASE : str = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_SCREAMING_SNAKE_CASE : int = tensor
return padded_tensor
| 533 | '''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 100 , ):
UpperCAmelCase_ = x_start
UpperCAmelCase_ = fnc(A_ )
UpperCAmelCase_ = 0.0
for _ in range(A_ ):
# Approximates curve as a sequence of linear lines and sums their length
UpperCAmelCase_ = (x_end - x_start) / steps + xa
UpperCAmelCase_ = fnc(A_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
UpperCAmelCase_ = xa
UpperCAmelCase_ = fxa
return length
if __name__ == "__main__":
def lowerCamelCase__ ( A_ ):
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
__snake_case : List[Any] = 10
while i <= 10_00_00:
print(F'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 660 | 0 |
from sklearn.metrics import recall_score
import datasets
__SCREAMING_SNAKE_CASE ='''
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
'''
__SCREAMING_SNAKE_CASE ='''
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{\'recall\': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{\'recall\': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{\'recall\': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'recall\': array([1., 0., 0.])}
'''
__SCREAMING_SNAKE_CASE ='''
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __magic_name__ ( datasets.Metric):
'''simple docstring'''
def _A ( self: Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def _A ( self: Tuple , _lowerCamelCase: Optional[int] , _lowerCamelCase: str , _lowerCamelCase: List[Any]=None , _lowerCamelCase: Optional[int]=1 , _lowerCamelCase: Tuple="binary" , _lowerCamelCase: Optional[int]=None , _lowerCamelCase: Optional[Any]="warn" , ):
SCREAMING_SNAKE_CASE_ = recall_score(
UpperCamelCase__ , UpperCamelCase__ , labels=UpperCamelCase__ , pos_label=UpperCamelCase__ , average=UpperCamelCase__ , sample_weight=UpperCamelCase__ , zero_division=UpperCamelCase__ , )
return {"recall": float(UpperCamelCase__ ) if score.size == 1 else score}
| 234 | '''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowercase_ ( _A ):
a_ = """"""
a_ = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Tuple:
"""simple docstring"""
super().__init__(self , **UpperCamelCase__ )
UpperCAmelCase_ = repo_info
UpperCAmelCase_ = token
UpperCAmelCase_ = None
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
if self.dir_cache is None:
UpperCAmelCase_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase_ = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(UpperCamelCase__ ): {"name": str(UpperCamelCase__ ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = "rb" , **UpperCamelCase__ , ) -> Optional[int]:
"""simple docstring"""
if not isinstance(self.repo_info , UpperCamelCase__ ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
UpperCAmelCase_ = hf_hub_url(self.repo_info.id , UpperCamelCase__ , revision=self.repo_info.sha )
return fsspec.open(
UpperCamelCase__ , mode=UpperCamelCase__ , headers=get_authentication_headers_for_url(UpperCamelCase__ , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCamelCase_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
self._get_dirs()
UpperCAmelCase_ = self._strip_protocol(UpperCamelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=False , **UpperCamelCase__ ) -> str:
"""simple docstring"""
self._get_dirs()
UpperCAmelCase_ = PurePosixPath(path.strip("/" ) )
UpperCAmelCase_ = {}
for p, f in self.dir_cache.items():
UpperCAmelCase_ = PurePosixPath(p.strip("/" ) )
UpperCAmelCase_ = p.parent
if root == path:
UpperCAmelCase_ = f
UpperCAmelCase_ = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660 | 0 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = Dict[str, Any]
__UpperCAmelCase = List[Prediction]
@add_end_docstrings(_A )
class lowerCamelCase (_A ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , 'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Tuple = {}
if "threshold" in kwargs:
UpperCAmelCase_ : Optional[Any] = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]:
UpperCAmelCase_ : Dict = load_image(UpperCamelCase__ )
UpperCAmelCase_ : List[str] = torch.IntTensor([[image.height, image.width]] )
UpperCAmelCase_ : List[Any] = self.image_processor(images=[image] , return_tensors='pt' )
if self.tokenizer is not None:
UpperCAmelCase_ : Dict = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' )
UpperCAmelCase_ : List[Any] = target_size
return inputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : str = model_inputs.pop('target_size' )
UpperCAmelCase_ : List[Any] = self.model(**UpperCamelCase__ )
UpperCAmelCase_ : Any = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
UpperCAmelCase_ : int = model_inputs['bbox']
return model_outputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0.9 ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
UpperCAmelCase_ , UpperCAmelCase_ : Dict = target_size[0].tolist()
def unnormalize(_UpperCamelCase ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_0_0_0),
(height * bbox[1] / 1_0_0_0),
(width * bbox[2] / 1_0_0_0),
(height * bbox[3] / 1_0_0_0),
] ) )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
UpperCAmelCase_ : Any = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
UpperCAmelCase_ : Optional[Any] = [unnormalize(UpperCamelCase__ ) for bbox in model_outputs['bbox'].squeeze(0 )]
UpperCAmelCase_ : Optional[Any] = ['score', 'label', 'box']
UpperCAmelCase_ : List[str] = [dict(zip(UpperCamelCase__ , UpperCamelCase__ ) ) for vals in zip(scores.tolist() , UpperCamelCase__ , UpperCamelCase__ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
UpperCAmelCase_ : Any = self.image_processor.post_process_object_detection(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ : str = raw_annotations[0]
UpperCAmelCase_ : Optional[int] = raw_annotation['scores']
UpperCAmelCase_ : List[Any] = raw_annotation['labels']
UpperCAmelCase_ : Optional[Any] = raw_annotation['boxes']
UpperCAmelCase_ : Optional[Any] = scores.tolist()
UpperCAmelCase_ : Union[str, Any] = [self.model.config.idalabel[label.item()] for label in labels]
UpperCAmelCase_ : Optional[Any] = [self._get_bounding_box(UpperCamelCase__ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
UpperCAmelCase_ : Tuple = ['score', 'label', 'box']
UpperCAmelCase_ : Dict = [
dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] )
]
return annotation
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = box.int().tolist()
UpperCAmelCase_ : Tuple = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 406 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Union[str, Any] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 660 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__ ( _A ):
lowercase_ = ["image_processor", "tokenizer"]
lowercase_ = "BlipImageProcessor"
lowercase_ = "AutoTokenizer"
def __init__( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : str = False
super().__init__(UpperCamelCase__ , UpperCamelCase__)
__UpperCAmelCase : List[str] = self.image_processor
def __call__( self : Tuple , UpperCamelCase_ : Tuple = None , UpperCamelCase_ : List[Any] = None , UpperCamelCase_ : Any = True , UpperCamelCase_ : Optional[Any] = False , UpperCamelCase_ : Tuple = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : str = 0 , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Union[str, Any] = None , UpperCamelCase_ : Union[str, Any] = False , UpperCamelCase_ : Tuple = False , UpperCamelCase_ : List[Any] = False , UpperCamelCase_ : List[str] = False , UpperCamelCase_ : str = False , UpperCamelCase_ : str = True , UpperCamelCase_ : str = None , **UpperCamelCase_ : Tuple , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("You have to specify either images or text.")
# Get only text
if images is None:
__UpperCAmelCase : List[Any] = self.tokenizer
__UpperCAmelCase : Dict = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
return text_encoding
# add pixel_values
__UpperCAmelCase : str = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__)
if text is not None:
__UpperCAmelCase : List[Any] = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
else:
__UpperCAmelCase : Dict = None
if text_encoding is not None:
encoding_image_processor.update(UpperCamelCase__)
return encoding_image_processor
def a_ ( self : Dict , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Optional[int]):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__)
def a_ ( self : Tuple , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Optional[int]):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__)
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : List[str] = self.tokenizer.model_input_names
__UpperCAmelCase : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 77 | '''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
class lowercase_ ( _A ):
a_ = """linear"""
a_ = """cosine"""
a_ = """cosine_with_restarts"""
a_ = """polynomial"""
a_ = """constant"""
a_ = """constant_with_warmup"""
a_ = """piecewise_constant"""
def lowerCamelCase__ ( A_ , A_ = -1 ):
return LambdaLR(A_ , lambda A_ : 1 , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1.0 , A_ ) )
return 1.0
return LambdaLR(A_ , A_ , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ = -1 ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = step_rules.split("," )
for rule_str in rule_list[:-1]:
UpperCAmelCase_ , UpperCAmelCase_ = rule_str.split(":" )
UpperCAmelCase_ = int(A_ )
UpperCAmelCase_ = float(A_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = float(rule_list[-1] )
def create_rules_function(A_ , A_ ):
def rule_func(A_ ) -> float:
UpperCAmelCase_ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(A_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
UpperCAmelCase_ = create_rules_function(A_ , A_ )
return LambdaLR(A_ , A_ , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=-1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 0.5 , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(A_ ) * 2.0 * progress )) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 1 , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(A_ ) * progress) % 1.0) )) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=1e-7 , A_=1.0 , A_=-1 ):
UpperCAmelCase_ = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
UpperCAmelCase_ = lr_init - lr_end
UpperCAmelCase_ = num_training_steps - num_warmup_steps
UpperCAmelCase_ = 1 - (current_step - num_warmup_steps) / decay_steps
UpperCAmelCase_ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(A_ , A_ , A_ )
__snake_case : str = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( A_ , A_ , A_ = None , A_ = None , A_ = None , A_ = 1 , A_ = 1.0 , A_ = -1 , ):
UpperCAmelCase_ = SchedulerType(A_ )
UpperCAmelCase_ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(A_ , last_epoch=A_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(A_ , step_rules=A_ , last_epoch=A_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(A_ , num_warmup_steps=A_ , last_epoch=A_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , num_cycles=A_ , last_epoch=A_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , power=A_ , last_epoch=A_ , )
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , last_epoch=A_ )
| 660 | 0 |
"""simple docstring"""
from maths.prime_check import is_prime
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if not isinstance(A_ , A_ ):
__SCREAMING_SNAKE_CASE = f"""Input value of [number={number}] must be an integer"""
raise TypeError(A_ )
if is_prime(A_ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case : Optional[int] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__snake_case : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 660 | 0 |
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = '''▁'''
_UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
_UpperCamelCase = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
_UpperCamelCase = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
_UpperCamelCase = {
'''ernie-m-base''': 5_14,
'''ernie-m-large''': 5_14,
}
_UpperCamelCase = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class __UpperCAmelCase (_A ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = ['input_ids']
_UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Tuple = RESOURCE_FILES_NAMES
def __init__( self , snake_case_ , snake_case_=None , snake_case_=False , snake_case_="utf8" , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_ = None , **snake_case_ , ):
'''simple docstring'''
A__ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , vocab_file=UpperCamelCase__ , encoding=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
A__ : int = do_lower_case
A__ : Any = sentencepiece_model_ckpt
A__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
A__ : str = self.load_vocab(filepath=UpperCamelCase__ )
else:
A__ : List[Any] = {self.sp_model.id_to_piece(UpperCamelCase__ ): id for id in range(self.sp_model.get_piece_size() )}
A__ : str = {v: k for k, v in self.vocab.items()}
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
if text is None:
return None
A__ : Union[str, Any] = self.tokenize(UpperCamelCase__ )
A__ , A__ : Dict = """""", []
for i, ch in enumerate(UpperCamelCase__ ):
if ch in self.SP_CHAR_MAPPING:
A__ : Optional[Any] = self.SP_CHAR_MAPPING.get(UpperCamelCase__ )
else:
A__ : Optional[Any] = unicodedata.normalize("""NFKC""" , UpperCamelCase__ )
if self.is_whitespace(UpperCamelCase__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(UpperCamelCase__ ) )
A__ , A__ , A__ : Tuple = normalized_text, [], 0
if self.do_lower_case:
A__ : int = text.lower()
for token in split_tokens:
if token[:1] == "▁":
A__ : Tuple = token[1:]
A__ : str = text[offset:].index(UpperCamelCase__ ) + offset
A__ : Dict = start + len(UpperCamelCase__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
A__ : str = end
return token_mapping
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return len(self.vocab )
def lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
'''simple docstring'''
A__ : Any = self.__dict__.copy()
A__ : str = None
return state
def __setstate__( self , snake_case_ ):
'''simple docstring'''
A__ : List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ : Any = {}
A__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(UpperCamelCase__ , UpperCamelCase__ ) for c in text) )
def lowerCamelCase ( self , snake_case_ , snake_case_=False , snake_case_=64 , snake_case_=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
A__ : Any = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
A__ : Dict = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
A__ : List[Any] = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
A__ : Tuple = self.sp_model.EncodeAsPieces(UpperCamelCase__ )
else:
A__ : str = self.sp_model.SampleEncodeAsPieces(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ : int = []
for pi, piece in enumerate(UpperCamelCase__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(UpperCamelCase__ ) and pi != 0:
new_pieces.append(UpperCamelCase__ )
continue
else:
continue
A__ : int = 0
for i, chunk in enumerate(UpperCamelCase__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(UpperCamelCase__ ) or self.is_punct(UpperCamelCase__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(UpperCamelCase__ )
A__ : Any = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
A__ : str = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
A__ : int = i
if len(UpperCamelCase__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
A__ : List[str] = """""".join(UpperCamelCase__ ).replace(UpperCamelCase__ , """ """ ).strip()
return out_string
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
A__ : Union[str, Any] = self.convert_ids_to_tokens(UpperCamelCase__ )
A__ : int = """""".join(UpperCamelCase__ ).replace(UpperCamelCase__ , """ """ ).strip()
return out_string
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
return self.vocab.get(UpperCamelCase__ , self.vocab.get(self.unk_token ) )
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
return self.reverse_vocab.get(UpperCamelCase__ , self.unk_token )
def lowerCamelCase ( self , snake_case_ , snake_case_=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ : Dict = [self.cls_token_id]
A__ : List[str] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def lowerCamelCase ( self , snake_case_ , snake_case_=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def lowerCamelCase ( self , snake_case_ , snake_case_=None , snake_case_=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
def lowerCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(UpperCamelCase__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(UpperCamelCase__ ) + 1) + [1] * (len(UpperCamelCase__ ) + 3)
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(UpperCamelCase__ ) == 1:
A__ : str = unicodedata.category(UpperCamelCase__ )
if cat == "Zs":
return True
return False
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
A__ : str = {}
with io.open(UpperCamelCase__ , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(UpperCamelCase__ ):
A__ : Tuple = line.rstrip("""\n""" )
A__ : int = int(UpperCamelCase__ )
return token_to_idx
def lowerCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
A__ : Optional[int] = 0
if os.path.isdir(UpperCamelCase__ ):
A__ : Tuple = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
A__ : List[Any] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda snake_case_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
A__ : int = token_index
writer.write(token + """\n""" )
index += 1
A__ : Optional[Any] = os.path.join(UpperCamelCase__ , """sentencepiece.bpe.model""" )
with open(UpperCamelCase__ , """wb""" ) as fi:
A__ : str = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (vocab_file,)
| 363 | '''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case : Union[str, Any] = ''''''
__snake_case : List[Any] = ''''''
__snake_case : List[str] = ''''''
__snake_case : Any = ''''''
def lowerCamelCase__ ( A_ ):
# authorize twitter, initialize tweepy
UpperCAmelCase_ = tweepy.OAuthHandler(A_ , A_ )
auth.set_access_token(A_ , A_ )
UpperCAmelCase_ = tweepy.API(A_ )
# initialize a list to hold all the tweepy Tweets
UpperCAmelCase_ = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCAmelCase_ = api.user_timeline(screen_name=A_ , count=200 )
# save most recent tweets
alltweets.extend(A_ )
# save the id of the oldest tweet less one
UpperCAmelCase_ = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(A_ ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
UpperCAmelCase_ = api.user_timeline(
screen_name=A_ , count=200 , max_id=A_ )
# save most recent tweets
alltweets.extend(A_ )
# update the id of the oldest tweet less one
UpperCAmelCase_ = alltweets[-1].id - 1
print(F"""...{len(A_ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCAmelCase_ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , "w" ) as f:
UpperCAmelCase_ = csv.writer(A_ )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(A_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 660 | 0 |
from __future__ import annotations
import math
class _lowercase :
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
lowerCAmelCase_: int = size
# approximate the overall size of segment tree with given value
lowerCAmelCase_: Optional[int] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowerCAmelCase_: int = [0 for i in range(0 , 4 * size )]
lowerCAmelCase_: List[Any] = [0 for i in range(0 , 4 * size )] # flag for lazy update
def _a ( self , lowerCamelCase__ ):
return idx * 2
def _a ( self , lowerCamelCase__ ):
return idx * 2 + 1
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if left_element == right_element:
lowerCAmelCase_: str = a[left_element - 1]
else:
lowerCAmelCase_: Tuple = (left_element + right_element) // 2
self.build(self.left(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.build(self.right(UpperCamelCase__ ) , mid + 1 , UpperCamelCase__ , UpperCamelCase__ )
lowerCAmelCase_: Optional[int] = max(
self.segment_tree[self.left(UpperCamelCase__ )] , self.segment_tree[self.right(UpperCamelCase__ )] )
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if self.flag[idx] is True:
lowerCAmelCase_: int = self.lazy[idx]
lowerCAmelCase_: Any = False
if left_element != right_element:
lowerCAmelCase_: Dict = self.lazy[idx]
lowerCAmelCase_: Any = self.lazy[idx]
lowerCAmelCase_: int = True
lowerCAmelCase_: int = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowerCAmelCase_: Optional[Any] = val
if left_element != right_element:
lowerCAmelCase_: Tuple = val
lowerCAmelCase_: int = val
lowerCAmelCase_: Optional[int] = True
lowerCAmelCase_: List[str] = True
return True
lowerCAmelCase_: Optional[int] = (left_element + right_element) // 2
self.update(self.left(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.update(self.right(UpperCamelCase__ ) , mid + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCAmelCase_: Optional[int] = max(
self.segment_tree[self.left(UpperCamelCase__ )] , self.segment_tree[self.right(UpperCamelCase__ )] )
return True
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if self.flag[idx] is True:
lowerCAmelCase_: Optional[Any] = self.lazy[idx]
lowerCAmelCase_: int = False
if left_element != right_element:
lowerCAmelCase_: int = self.lazy[idx]
lowerCAmelCase_: List[Any] = self.lazy[idx]
lowerCAmelCase_: Optional[int] = True
lowerCAmelCase_: str = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowerCAmelCase_: Any = (left_element + right_element) // 2
lowerCAmelCase_: str = self.query(self.left(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCAmelCase_: str = self.query(self.right(UpperCamelCase__ ) , mid + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return max(UpperCamelCase__ , UpperCamelCase__ )
def __str__( self ):
return str([self.query(1 , 1 , self.size , UpperCamelCase__ , UpperCamelCase__ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
a : Optional[Any] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
a : Tuple = 1_5
a : Optional[int] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt) | 613 | '''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__snake_case : int = logging.get_logger(__name__)
class lowercase_ ( _A ):
def __init__( self , **UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["bs4"] )
super().__init__(**UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCAmelCase_ = parent.find_all(child.name , recursive=UpperCamelCase__ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(UpperCamelCase__ ) else next(i for i, s in enumerate(UpperCamelCase__ , 1 ) if s is child ) )
UpperCAmelCase_ = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = BeautifulSoup(UpperCamelCase__ , "html.parser" )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for element in html_code.descendants:
if type(UpperCamelCase__ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCAmelCase_ = html.unescape(UpperCamelCase__ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = self.xpath_soup(UpperCamelCase__ )
stringaxtag_seq.append(UpperCamelCase__ )
stringaxsubs_seq.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = ""
for tagname, subs in zip(UpperCamelCase__ , UpperCamelCase__ ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , UpperCamelCase__ ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase_ = False
# Check that strings has a valid type
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = True
elif isinstance(UpperCamelCase__ , (list, tuple) ):
if len(UpperCamelCase__ ) == 0 or isinstance(html_strings[0] , UpperCamelCase__ ):
UpperCAmelCase_ = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
F"""but is of type {type(UpperCamelCase__ )}.""" )
UpperCAmelCase_ = bool(isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(html_strings[0] , UpperCamelCase__ )) )
if not is_batched:
UpperCAmelCase_ = [html_strings]
# Get nodes + xpaths
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for html_string in html_strings:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.get_three_from_single(UpperCamelCase__ )
nodes.append(UpperCamelCase__ )
UpperCAmelCase_ = []
for node, tag_list, sub_list in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = self.construct_xpath(UpperCamelCase__ , UpperCamelCase__ )
xpath_strings.append(UpperCamelCase__ )
xpaths.append(UpperCamelCase__ )
# return as Dict
UpperCAmelCase_ = {"nodes": nodes, "xpaths": xpaths}
UpperCAmelCase_ = BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
return encoded_inputs
| 660 | 0 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __a (unittest.TestCase , _A):
'''simple docstring'''
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = load_tool("""text-classification""" )
self.tool.setup()
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_tool("""text-classification""" , remote=UpperCamelCase__ )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(UpperCamelCase__ , """positive""" )
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.remote_tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(UpperCamelCase__ , """positive""" )
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(UpperCamelCase__ , """positive""" )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.remote_tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(UpperCamelCase__ , """positive""" )
| 680 | '''simple docstring'''
def lowerCamelCase__ ( A_ , A_ ):
_validate_point(A_ )
_validate_point(A_ )
if len(A_ ) != len(A_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(A_ , A_ ) ) )
def lowerCamelCase__ ( A_ ):
if point:
if isinstance(A_ , A_ ):
for item in point:
if not isinstance(A_ , (int, float) ):
UpperCAmelCase_ = (
"Expected a list of numbers as input, found "
F"""{type(A_ ).__name__}"""
)
raise TypeError(A_ )
else:
UpperCAmelCase_ = F"""Expected a list of numbers as input, found {type(A_ ).__name__}"""
raise TypeError(A_ )
else:
raise ValueError("Missing an input" )
def lowerCamelCase__ ( A_ , A_ ):
_validate_point(A_ )
_validate_point(A_ )
if len(A_ ) != len(A_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(A_ , A_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
lowercase_ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def A_ ( lowercase ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[str] = {}
with open(A_ , """r""" ) as file:
for line_number, line in enumerate(A_ ):
UpperCAmelCase_ : Optional[int] = line.strip()
if line:
UpperCAmelCase_ : List[str] = line.split()
UpperCAmelCase_ : Optional[Any] = line_number
UpperCAmelCase_ : List[str] = words[0]
UpperCAmelCase_ : Any = value
return result
def A_ ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split(""".""" ):
UpperCAmelCase_ : Union[str, Any] = getattr(A_ , A_ )
UpperCAmelCase_ : List[str] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(A_ ):
UpperCAmelCase_ : Any = PARAM_MAPPING[full_name.split(""".""" )[-1]]
UpperCAmelCase_ : Tuple = """param"""
if weight_type is not None and weight_type != "param":
UpperCAmelCase_ : Optional[int] = getattr(A_ , A_ ).shape
elif weight_type is not None and weight_type == "param":
UpperCAmelCase_ : Union[str, Any] = hf_pointer
for attribute in hf_param_name.split(""".""" ):
UpperCAmelCase_ : int = getattr(A_ , A_ )
UpperCAmelCase_ : Optional[int] = shape_pointer.shape
# let's reduce dimension
UpperCAmelCase_ : Tuple = value[0]
else:
UpperCAmelCase_ : str = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCAmelCase_ : Optional[int] = value
elif weight_type == "weight_g":
UpperCAmelCase_ : Dict = value
elif weight_type == "weight_v":
UpperCAmelCase_ : int = value
elif weight_type == "bias":
UpperCAmelCase_ : Any = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
UpperCAmelCase_ : Union[str, Any] = getattr(A_ , A_ )
UpperCAmelCase_ : int = value
else:
UpperCAmelCase_ : Dict = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def A_ ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[str] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(A_ ):
UpperCAmelCase_ : int = PARAM_MAPPING[full_name.split(""".""" )[-1]]
UpperCAmelCase_ : Dict = """param"""
if weight_type is not None and weight_type != "param":
UpperCAmelCase_ : int = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCAmelCase_ : Dict = """.""".join([key, hf_param_name] )
else:
UpperCAmelCase_ : int = key
UpperCAmelCase_ : List[Any] = value if """lm_head""" in full_key else value[0]
lowercase_ = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def A_ ( lowercase , lowercase , lowercase=None , lowercase=None ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = False
for key, mapped_key in MAPPING.items():
UpperCAmelCase_ : List[Any] = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCAmelCase_ : List[Any] = True
if "*" in mapped_key:
UpperCAmelCase_ : Union[str, Any] = name.split(A_ )[0].split(""".""" )[-2]
UpperCAmelCase_ : str = mapped_key.replace("""*""" , A_ )
if "weight_g" in name:
UpperCAmelCase_ : Optional[int] = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase_ : str = """weight_v"""
elif "bias" in name:
UpperCAmelCase_ : List[Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase_ : int = """weight"""
else:
UpperCAmelCase_ : Optional[int] = None
if hf_dict is not None:
rename_dict(A_ , A_ , A_ , A_ , A_ )
else:
set_recursively(A_ , A_ , A_ , A_ , A_ )
return is_used
return is_used
def A_ ( lowercase , lowercase , lowercase ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : Dict = fairseq_model.state_dict()
UpperCAmelCase_ : List[Any] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase_ : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
A_ , A_ , A_ , A_ , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase_ : int = True
else:
UpperCAmelCase_ : Tuple = load_wavaveca_layer(A_ , A_ , A_ )
if not is_used:
unused_weights.append(A_ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def A_ ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase_ : List[Any] = name.split(""".""" )
UpperCAmelCase_ : Dict = int(items[0] )
UpperCAmelCase_ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCAmelCase_ : int = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCAmelCase_ : int = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCAmelCase_ : Optional[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCAmelCase_ : Optional[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(A_ )
@torch.no_grad()
def A_ ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True , lowercase=False ) -> str:
"""simple docstring"""
if config_path is not None:
UpperCAmelCase_ : Any = WavaVecaConfig.from_pretrained(A_ )
else:
UpperCAmelCase_ : Tuple = WavaVecaConfig()
if is_seq_class:
UpperCAmelCase_ : int = read_txt_into_dict(A_ )
UpperCAmelCase_ : List[str] = idalabel
UpperCAmelCase_ : Any = WavaVecaForSequenceClassification(A_ )
UpperCAmelCase_ : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=A_ , return_attention_mask=A_ , )
feature_extractor.save_pretrained(A_ )
elif is_finetuned:
if dict_path:
UpperCAmelCase_ : Optional[int] = Dictionary.load(A_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase_ : Dict = target_dict.pad_index
UpperCAmelCase_ : List[str] = target_dict.bos_index
UpperCAmelCase_ : Union[str, Any] = target_dict.eos_index
UpperCAmelCase_ : Optional[int] = len(target_dict.symbols )
UpperCAmelCase_ : Tuple = os.path.join(A_ , """vocab.json""" )
if not os.path.isdir(A_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(A_ ) )
return
os.makedirs(A_ , exist_ok=A_ )
UpperCAmelCase_ : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : List[str] = 1
with open(A_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(A_ , A_ )
UpperCAmelCase_ : Dict = WavaVecaCTCTokenizer(
A_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=A_ , )
UpperCAmelCase_ : Optional[Any] = True if config.feat_extract_norm == """layer""" else False
UpperCAmelCase_ : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=A_ , return_attention_mask=A_ , )
UpperCAmelCase_ : Optional[Any] = WavaVecaProcessor(feature_extractor=A_ , tokenizer=A_ )
processor.save_pretrained(A_ )
UpperCAmelCase_ : int = WavaVecaForCTC(A_ )
else:
UpperCAmelCase_ : List[Any] = WavaVecaForPreTraining(A_ )
if is_finetuned or is_seq_class:
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
UpperCAmelCase_ : Optional[int] = argparse.Namespace(task="""audio_pretraining""" )
UpperCAmelCase_ : List[str] = fairseq.tasks.setup_task(A_ )
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A_ )
UpperCAmelCase_ : Union[str, Any] = model[0].eval()
recursively_load_weights(A_ , A_ , not is_finetuned )
hf_wavavec.save_pretrained(A_ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
lowercase_ = parser.parse_args()
lowercase_ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 470 | '''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__snake_case : Optional[int] = logging.getLogger(__name__)
def lowerCamelCase__ ( A_ , A_ ):
# save results
if os.path.exists(A_ ):
if os.path.exists(os.path.join(A_ , "config.json" ) ) and os.path.isfile(
os.path.join(A_ , "config.json" ) ):
os.remove(os.path.join(A_ , "config.json" ) )
if os.path.exists(os.path.join(A_ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(A_ , "pytorch_model.bin" ) ):
os.remove(os.path.join(A_ , "pytorch_model.bin" ) )
else:
os.makedirs(A_ )
model.save_pretrained(A_ )
def lowerCamelCase__ ( A_ , A_=False ):
UpperCAmelCase_ = 2
if unlogit:
UpperCAmelCase_ = torch.pow(A_ , A_ )
UpperCAmelCase_ = p * torch.log(A_ )
UpperCAmelCase_ = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase__ ( A_ ):
logger.info("lv, h >\t" + "\t".join(F"""{x + 1}""" for x in range(len(A_ ) ) ) )
for row in range(len(A_ ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=True , A_=True , A_=None , A_=False ):
UpperCAmelCase_ , UpperCAmelCase_ = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase_ = torch.zeros(A_ , A_ ).to(args.device )
UpperCAmelCase_ = torch.zeros(A_ , A_ ).to(args.device )
if head_mask is None:
UpperCAmelCase_ = torch.ones(A_ , A_ ).to(args.device )
head_mask.requires_grad_(requires_grad=A_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase_ = None
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 0.0
for step, inputs in enumerate(tqdm(A_ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase_ = tuple(t.to(args.device ) for t in inputs )
((UpperCAmelCase_) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase_ = model(A_ , labels=A_ , head_mask=A_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(A_ ):
UpperCAmelCase_ = entropy(attn.detach() , A_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(A_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase_ = 2
UpperCAmelCase_ = torch.pow(torch.pow(A_ , A_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
UpperCAmelCase_ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(A_ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(A_ )
logger.info("Head ranked by importance scores" )
UpperCAmelCase_ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase_ = torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase_ = head_ranks.view_as(A_ )
print_ad_tensor(A_ )
return attn_entropy, head_importance, total_loss
def lowerCamelCase__ ( A_ , A_ , A_ ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(A_ , A_ , A_ , compute_entropy=A_ )
UpperCAmelCase_ = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , A_ , original_score * args.masking_threshold )
UpperCAmelCase_ = torch.ones_like(A_ )
UpperCAmelCase_ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase_ = original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase_ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase_ = float("Inf" )
UpperCAmelCase_ = head_importance.view(-1 ).sort()[1]
if len(A_ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
UpperCAmelCase_ = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase_ = new_head_mask.view(-1 )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = new_head_mask.view_as(A_ )
UpperCAmelCase_ = new_head_mask.clone().detach()
print_ad_tensor(A_ )
# Compute metric and head importance again
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , head_mask=A_ )
UpperCAmelCase_ = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , A_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(A_ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase__ ( A_ , A_ , A_ , A_ ):
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(A_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(A_ , A_ ):
UpperCAmelCase_ = [
v,
]
assert sum(len(A_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(A_ )
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ , actually_pruned=A_ , )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , A_ , A_ , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , A_ , A_ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(A_ , args.output_dir )
def lowerCamelCase__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=A_ , type=A_ , required=A_ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=A_ , type=A_ , required=A_ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=A_ , type=A_ , required=A_ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=A_ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=A_ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=A_ , type=A_ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=A_ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=A_ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=A_ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=A_ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=A_ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=A_ , help="Batch size." )
parser.add_argument("--seed" , type=A_ , default=42 )
parser.add_argument("--local_rank" , type=A_ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=A_ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=A_ , default="" , help="Can be used for distant debugging." )
UpperCAmelCase_ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase_ = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
UpperCAmelCase_ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase_ = torch.device("cuda" , args.local_rank )
UpperCAmelCase_ = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase_ = nn.parallel.DistributedDataParallel(
A_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=A_ )
elif args.n_gpu > 1:
UpperCAmelCase_ = nn.DataParallel(A_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=A_ )
torch.save(A_ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , A_ )
# Prepare dataset
UpperCAmelCase_ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase_ = (torch.from_numpy(A_ ),)
UpperCAmelCase_ = TensorDataset(*A_ )
UpperCAmelCase_ = RandomSampler(A_ )
UpperCAmelCase_ = DataLoader(A_ , sampler=A_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(A_ , A_ , A_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase_ = mask_heads(A_ , A_ , A_ )
prune_heads(A_ , A_ , A_ , A_ )
if __name__ == "__main__":
main()
| 660 | 0 |
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_A: List[Any] = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_A: List[str] = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def _lowerCAmelCase ( _lowerCAmelCase )-> int:
__UpperCAmelCase = numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=A_ )[0]
@deprecated(A_ , 'Please use tf.data to implement this functionality.' )
def _lowerCAmelCase ( _lowerCAmelCase )-> Optional[Any]:
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=A_ ) as bytestream:
__UpperCAmelCase = _readaa(A_ )
if magic != 20_51:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
__UpperCAmelCase = _readaa(A_ )
__UpperCAmelCase = _readaa(A_ )
__UpperCAmelCase = _readaa(A_ )
__UpperCAmelCase = bytestream.read(rows * cols * num_images )
__UpperCAmelCase = numpy.frombuffer(A_ , dtype=numpy.uinta )
__UpperCAmelCase = data.reshape(A_ , A_ , A_ , 1 )
return data
@deprecated(A_ , 'Please use tf.one_hot on tensors.' )
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> List[str]:
__UpperCAmelCase = labels_dense.shape[0]
__UpperCAmelCase = numpy.arange(A_ ) * num_classes
__UpperCAmelCase = numpy.zeros((num_labels, num_classes) )
__UpperCAmelCase = 1
return labels_one_hot
@deprecated(A_ , 'Please use tf.data to implement this functionality.' )
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=10 )-> Union[str, Any]:
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=A_ ) as bytestream:
__UpperCAmelCase = _readaa(A_ )
if magic != 20_49:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
__UpperCAmelCase = _readaa(A_ )
__UpperCAmelCase = bytestream.read(A_ )
__UpperCAmelCase = numpy.frombuffer(A_ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(A_ , A_ )
return labels
class UpperCAmelCase :
@deprecated(
UpperCamelCase__ , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self , __A , __A , __A=False , __A=False , __A=dtypes.floataa , __A=True , __A=None , ):
__UpperCAmelCase , __UpperCAmelCase = random_seed.get_seed(UpperCamelCase__ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__UpperCAmelCase = dtypes.as_dtype(UpperCamelCase__ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
__UpperCAmelCase = 10_000
__UpperCAmelCase = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
__UpperCAmelCase = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__UpperCAmelCase = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__UpperCAmelCase = images.astype(numpy.floataa )
__UpperCAmelCase = numpy.multiply(UpperCamelCase__ , 1.0 / 2_5_5.0 )
__UpperCAmelCase = images
__UpperCAmelCase = labels
__UpperCAmelCase = 0
__UpperCAmelCase = 0
@property
def __lowerCamelCase ( self ):
return self._images
@property
def __lowerCamelCase ( self ):
return self._labels
@property
def __lowerCamelCase ( self ):
return self._num_examples
@property
def __lowerCamelCase ( self ):
return self._epochs_completed
def __lowerCamelCase ( self , __A , __A=False , __A=True ):
if fake_data:
__UpperCAmelCase = [1] * 784
__UpperCAmelCase = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(UpperCamelCase__ )],
[fake_label for _ in range(UpperCamelCase__ )],
)
__UpperCAmelCase = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__UpperCAmelCase = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCamelCase__ )
__UpperCAmelCase = self.images[perma]
__UpperCAmelCase = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__UpperCAmelCase = self._num_examples - start
__UpperCAmelCase = self._images[start : self._num_examples]
__UpperCAmelCase = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__UpperCAmelCase = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCamelCase__ )
__UpperCAmelCase = self.images[perm]
__UpperCAmelCase = self.labels[perm]
# Start next epoch
__UpperCAmelCase = 0
__UpperCAmelCase = batch_size - rest_num_examples
__UpperCAmelCase = self._index_in_epoch
__UpperCAmelCase = self._images[start:end]
__UpperCAmelCase = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__UpperCAmelCase = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(A_ , 'Please write your own downloading logic.' )
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )-> List[str]:
if not gfile.Exists(A_ ):
gfile.MakeDirs(A_ )
__UpperCAmelCase = os.path.join(A_ , A_ )
if not gfile.Exists(A_ ):
urllib.request.urlretrieve(A_ , A_ ) # noqa: S310
with gfile.GFile(A_ ) as f:
__UpperCAmelCase = f.size()
print('Successfully downloaded' , A_ , A_ , 'bytes.' )
return filepath
@deprecated(
A_ , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=dtypes.floataa , _lowerCAmelCase=True , _lowerCAmelCase=50_00 , _lowerCAmelCase=None , _lowerCAmelCase=DEFAULT_SOURCE_URL , )-> Optional[int]:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=A_ , one_hot=A_ , dtype=A_ , seed=A_ )
__UpperCAmelCase = fake()
__UpperCAmelCase = fake()
__UpperCAmelCase = fake()
return _Datasets(train=A_ , validation=A_ , test=A_ )
if not source_url: # empty string check
__UpperCAmelCase = DEFAULT_SOURCE_URL
__UpperCAmelCase = 'train-images-idx3-ubyte.gz'
__UpperCAmelCase = 'train-labels-idx1-ubyte.gz'
__UpperCAmelCase = 't10k-images-idx3-ubyte.gz'
__UpperCAmelCase = 't10k-labels-idx1-ubyte.gz'
__UpperCAmelCase = _maybe_download(
A_ , A_ , source_url + train_images_file )
with gfile.Open(A_ , 'rb' ) as f:
__UpperCAmelCase = _extract_images(A_ )
__UpperCAmelCase = _maybe_download(
A_ , A_ , source_url + train_labels_file )
with gfile.Open(A_ , 'rb' ) as f:
__UpperCAmelCase = _extract_labels(A_ , one_hot=A_ )
__UpperCAmelCase = _maybe_download(
A_ , A_ , source_url + test_images_file )
with gfile.Open(A_ , 'rb' ) as f:
__UpperCAmelCase = _extract_images(A_ )
__UpperCAmelCase = _maybe_download(
A_ , A_ , source_url + test_labels_file )
with gfile.Open(A_ , 'rb' ) as f:
__UpperCAmelCase = _extract_labels(A_ , one_hot=A_ )
if not 0 <= validation_size <= len(A_ ):
__UpperCAmelCase = (
'Validation size should be between 0 and '
F'{len(A_ )}. Received: {validation_size}.'
)
raise ValueError(A_ )
__UpperCAmelCase = train_images[:validation_size]
__UpperCAmelCase = train_labels[:validation_size]
__UpperCAmelCase = train_images[validation_size:]
__UpperCAmelCase = train_labels[validation_size:]
__UpperCAmelCase = {'dtype': dtype, 'reshape': reshape, 'seed': seed}
__UpperCAmelCase = _DataSet(A_ , A_ , **A_ )
__UpperCAmelCase = _DataSet(A_ , A_ , **A_ )
__UpperCAmelCase = _DataSet(A_ , A_ , **A_ )
return _Datasets(train=A_ , validation=A_ , test=A_ )
| 126 | '''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__snake_case : str = logging.getLogger(__name__)
def lowerCamelCase__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=A_ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=A_ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=A_ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=A_ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase_ = parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
UpperCAmelCase_ = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase_ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase_ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase_ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase_ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase_ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase_ = fp.readlines()
logger.info("Start encoding" )
logger.info(F"""{len(A_ )} examples to process.""" )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
UpperCAmelCase_ = 10_000
UpperCAmelCase_ = time.time()
for text in data:
UpperCAmelCase_ = F"""{bos} {text.strip()} {sep}"""
UpperCAmelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
rslt.append(A_ )
iter += 1
if iter % interval == 0:
UpperCAmelCase_ = time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
UpperCAmelCase_ = time.time()
logger.info("Finished binarization" )
logger.info(F"""{len(A_ )} examples processed.""" )
UpperCAmelCase_ = F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
UpperCAmelCase_ = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase_ = [np.uintaa(A_ ) for d in rslt]
else:
UpperCAmelCase_ = [np.intaa(A_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(A_ , "wb" ) as handle:
pickle.dump(rslt_ , A_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 660 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _A :
lowercase_ : List[Any] = 42
# setable values
lowercase_ : Dict = 42
lowercase_ : Tuple = 42
lowercase_ : str = None
@classmethod
def a ( cls : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple ):
"""simple docstring"""
return cls(common=UpperCamelCase__ , init_noise_sigma=UpperCamelCase__ , timesteps=UpperCamelCase__ )
@dataclass
class _A ( _A ):
lowercase_ : List[Any] = 42
class _A ( _A , _A ):
lowercase_ : Dict = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowercase_ : List[str] = 42
@property
def a ( self : List[Any] ):
"""simple docstring"""
return True
@register_to_config
def __init__( self : List[Any] , lowerCamelCase__ : Union[str, Any] = 10_00 , lowerCamelCase__ : List[Any] = 0.0001 , lowerCamelCase__ : List[str] = 0.02 , lowerCamelCase__ : str = "linear" , lowerCamelCase__ : str = None , lowerCamelCase__ : Dict = "fixed_small" , lowerCamelCase__ : List[str] = True , lowerCamelCase__ : List[str] = "epsilon" , lowerCamelCase__ : Optional[Any] = jnp.floataa , ):
"""simple docstring"""
__UpperCamelCase : List[str] = dtype
def a ( self : Optional[int] , lowerCamelCase__ : List[str] = None ):
"""simple docstring"""
if common is None:
__UpperCamelCase : int = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__UpperCamelCase : Dict = jnp.array(1.0 , dtype=self.dtype )
__UpperCamelCase : Union[str, Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=UpperCamelCase__ , init_noise_sigma=UpperCamelCase__ , timesteps=UpperCamelCase__ , )
def a ( self : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any = None ):
"""simple docstring"""
return sample
def a ( self : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str = () ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__UpperCamelCase : List[str] = (jnp.arange(0 , UpperCamelCase__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=UpperCamelCase__ , timesteps=UpperCamelCase__ , )
def a ( self : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Union[str, Any]=None ):
"""simple docstring"""
__UpperCamelCase : int = state.common.alphas_cumprod[t]
__UpperCamelCase : Any = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__UpperCamelCase : Optional[int] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__UpperCamelCase : Optional[int] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__UpperCamelCase : str = jnp.clip(UpperCamelCase__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__UpperCamelCase : Optional[Any] = jnp.log(jnp.clip(UpperCamelCase__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
__UpperCamelCase : Optional[int] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__UpperCamelCase : Optional[Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__UpperCamelCase : Dict = variance
__UpperCamelCase : Optional[Any] = state.common.betas[t]
__UpperCamelCase : Dict = (predicted_variance + 1) / 2
__UpperCamelCase : Any = frac * max_log + (1 - frac) * min_log
return variance
def a ( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple = None , lowerCamelCase__ : Optional[Any] = True , ):
"""simple docstring"""
__UpperCamelCase : Tuple = timestep
if key is None:
__UpperCamelCase : Dict = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__UpperCamelCase , __UpperCamelCase : Tuple = jnp.split(UpperCamelCase__ , sample.shape[1] , axis=1 )
else:
__UpperCamelCase : Optional[int] = None
# 1. compute alphas, betas
__UpperCamelCase : Any = state.common.alphas_cumprod[t]
__UpperCamelCase : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__UpperCamelCase : Tuple = 1 - alpha_prod_t
__UpperCamelCase : int = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__UpperCamelCase : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__UpperCamelCase : Any = model_output
elif self.config.prediction_type == "v_prediction":
__UpperCamelCase : Optional[int] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
""" for the FlaxDDPMScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__UpperCamelCase : Tuple = jnp.clip(UpperCamelCase__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCamelCase : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__UpperCamelCase : Optional[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCamelCase : Any = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__UpperCamelCase : str = jax.random.split(UpperCamelCase__ , num=1 )
__UpperCamelCase : Optional[Any] = jax.random.normal(UpperCamelCase__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(UpperCamelCase__ , UpperCamelCase__ , predicted_variance=UpperCamelCase__ ) ** 0.5) * noise
__UpperCamelCase : Union[str, Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__UpperCamelCase : List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=UpperCamelCase__ , state=UpperCamelCase__ )
def a ( self : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , ):
"""simple docstring"""
return add_noise_common(state.common , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def a ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] , ):
"""simple docstring"""
return get_velocity_common(state.common , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __len__( self : Union[str, Any] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 269 | '''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__snake_case : Dict = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__snake_case : str = json.load(f)
@require_torch
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = FSMTForConditionalGeneration.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = F"""facebook/wmt19-{pair}"""
UpperCAmelCase_ = self.get_tokenizer(UpperCamelCase__ )
UpperCAmelCase_ = self.get_model(UpperCamelCase__ )
UpperCAmelCase_ = bleu_data[pair]["src"]
UpperCAmelCase_ = bleu_data[pair]["tgt"]
UpperCAmelCase_ = tokenizer(UpperCamelCase__ , return_tensors="pt" , truncation=UpperCamelCase__ , padding="longest" ).to(UpperCamelCase__ )
UpperCAmelCase_ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase_ = tokenizer.batch_decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
UpperCAmelCase_ = calculate_bleu(UpperCamelCase__ , UpperCamelCase__ )
print(UpperCamelCase__ )
self.assertGreaterEqual(scores["bleu"] , UpperCamelCase__ )
| 660 | 0 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowercase__ ( _A ):
'''simple docstring'''
A_ : Optional[int] = (EulerDiscreteScheduler,)
A_ : Union[str, Any] = 10
def UpperCAmelCase_ ( self , **__snake_case ):
_SCREAMING_SNAKE_CASE : str = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**UpperCamelCase__ )
return config
def UpperCAmelCase_ ( self ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def UpperCAmelCase_ ( self ):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__ )
def UpperCAmelCase_ ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCamelCase__ )
def UpperCAmelCase_ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
_SCREAMING_SNAKE_CASE : Any = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
_SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Any = self.dummy_model()
_SCREAMING_SNAKE_CASE : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
_SCREAMING_SNAKE_CASE : List[Any] = sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
_SCREAMING_SNAKE_CASE : int = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(UpperCamelCase__ , UpperCamelCase__ )
_SCREAMING_SNAKE_CASE : Any = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ )
_SCREAMING_SNAKE_CASE : List[Any] = output.prev_sample
_SCREAMING_SNAKE_CASE : Optional[int] = torch.sum(torch.abs(UpperCamelCase__ ) )
_SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
_SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Any = self.dummy_model()
_SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
_SCREAMING_SNAKE_CASE : Union[str, Any] = sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
_SCREAMING_SNAKE_CASE : List[Any] = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
_SCREAMING_SNAKE_CASE : str = model(UpperCamelCase__ , UpperCamelCase__ )
_SCREAMING_SNAKE_CASE : Optional[int] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ )
_SCREAMING_SNAKE_CASE : int = output.prev_sample
_SCREAMING_SNAKE_CASE : Optional[int] = torch.sum(torch.abs(UpperCamelCase__ ) )
_SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE : str = self.get_scheduler_config()
_SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase__ )
_SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Dict = self.dummy_model()
_SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_SCREAMING_SNAKE_CASE : Union[str, Any] = sample.to(UpperCamelCase__ )
for t in scheduler.timesteps:
_SCREAMING_SNAKE_CASE : Tuple = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(UpperCamelCase__ , UpperCamelCase__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ )
_SCREAMING_SNAKE_CASE : str = output.prev_sample
_SCREAMING_SNAKE_CASE : Optional[int] = torch.sum(torch.abs(UpperCamelCase__ ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
_SCREAMING_SNAKE_CASE : Dict = scheduler_class(**UpperCamelCase__ , use_karras_sigmas=UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase__ )
_SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : int = self.dummy_model()
_SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_SCREAMING_SNAKE_CASE : Any = sample.to(UpperCamelCase__ )
for t in scheduler.timesteps:
_SCREAMING_SNAKE_CASE : List[str] = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
_SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCamelCase__ , UpperCamelCase__ )
_SCREAMING_SNAKE_CASE : Dict = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample
_SCREAMING_SNAKE_CASE : int = torch.sum(torch.abs(UpperCamelCase__ ) )
_SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1e-3
| 533 | '''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__snake_case : List[Any] = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Optional[int] = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : int = {
'''sample_size''': 2_56,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Dict = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : Tuple = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : str = {
'''num_train_timesteps''': 1_51,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def lowerCamelCase__ ( A_ ):
if isinstance(A_ , A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=False ):
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=None ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.bias"""]
UpperCAmelCase_ = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = (
checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ ):
UpperCAmelCase_ = torch.load(A_ , map_location="cpu" )
UpperCAmelCase_ = {}
UpperCAmelCase_ = checkpoint["time_embed.0.weight"]
UpperCAmelCase_ = checkpoint["time_embed.0.bias"]
UpperCAmelCase_ = checkpoint["time_embed.2.weight"]
UpperCAmelCase_ = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase_ = checkpoint["label_emb.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.bias"]
UpperCAmelCase_ = unet_config["down_block_types"]
UpperCAmelCase_ = unet_config["layers_per_block"]
UpperCAmelCase_ = unet_config["attention_head_dim"]
UpperCAmelCase_ = unet_config["block_out_channels"]
UpperCAmelCase_ = 1
UpperCAmelCase_ = channels_list[0]
for i, layer_type in enumerate(A_ ):
UpperCAmelCase_ = channels_list[i]
UpperCAmelCase_ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""down_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""down_blocks.{i}.downsamplers.0"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
current_layer += 1
UpperCAmelCase_ = current_channels
# hardcoded the mid-block for now
UpperCAmelCase_ = "mid_block.resnets.0"
UpperCAmelCase_ = "middle_block.0"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.attentions.0"
UpperCAmelCase_ = "middle_block.1"
UpperCAmelCase_ = convert_attention(A_ , A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.resnets.1"
UpperCAmelCase_ = "middle_block.2"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = 0
UpperCAmelCase_ = unet_config["up_block_types"]
for i, layer_type in enumerate(A_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.1"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""up_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.2"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = checkpoint["out.0.weight"]
UpperCAmelCase_ = checkpoint["out.0.bias"]
UpperCAmelCase_ = checkpoint["out.2.weight"]
UpperCAmelCase_ = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
__snake_case : List[str] = parser.parse_args()
__snake_case : Any = strabool(args.class_cond)
__snake_case : List[str] = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
__snake_case : Optional[int] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__snake_case : List[str] = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
__snake_case : Optional[Any] = None
__snake_case : Optional[int] = con_pt_to_diffuser(args.unet_path, unet_config)
__snake_case : str = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__snake_case : Tuple = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__snake_case : Optional[int] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
__snake_case : Optional[Any] = CMStochasticIterativeScheduler(**scheduler_config)
__snake_case : Dict = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 660 | 0 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
# General docstring
__SCREAMING_SNAKE_CASE ='''RegNetConfig'''
# Base docstring
__SCREAMING_SNAKE_CASE ='''facebook/regnet-y-040'''
__SCREAMING_SNAKE_CASE =[1, 1_088, 7, 7]
# Image classification docstring
__SCREAMING_SNAKE_CASE ='''facebook/regnet-y-040'''
__SCREAMING_SNAKE_CASE ='''tabby, tabby cat'''
__SCREAMING_SNAKE_CASE =[
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __magic_name__ ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self: Tuple , _lowerCamelCase: Any , _lowerCamelCase: int = 3 , _lowerCamelCase: Any = 1 , _lowerCamelCase: Any = 1 , _lowerCamelCase: Tuple = "relu" , **_lowerCamelCase: Optional[int] , ):
super().__init__(**UpperCamelCase__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
SCREAMING_SNAKE_CASE_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
SCREAMING_SNAKE_CASE_ = tf.keras.layers.ConvaD(
filters=UpperCamelCase__ , kernel_size=UpperCamelCase__ , strides=UpperCamelCase__ , padding='''VALID''' , groups=UpperCamelCase__ , use_bias=UpperCamelCase__ , name='''convolution''' , )
SCREAMING_SNAKE_CASE_ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
SCREAMING_SNAKE_CASE_ = ACTaFN[activation] if activation is not None else tf.identity
def _A ( self: Dict , _lowerCamelCase: Any ):
SCREAMING_SNAKE_CASE_ = self.convolution(self.padding(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE_ = self.normalization(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = self.activation(UpperCamelCase__ )
return hidden_state
class __magic_name__ ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self: Any , _lowerCamelCase: str , **_lowerCamelCase: Union[str, Any] ):
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = config.num_channels
SCREAMING_SNAKE_CASE_ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def _A ( self: Dict , _lowerCamelCase: Any ):
SCREAMING_SNAKE_CASE_ = shape_list(UpperCamelCase__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
SCREAMING_SNAKE_CASE_ = tf.transpose(UpperCamelCase__ , perm=(0, 2, 3, 1) )
SCREAMING_SNAKE_CASE_ = self.embedder(UpperCamelCase__ )
return hidden_state
class __magic_name__ ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self: Optional[Any] , _lowerCamelCase: Optional[int] , _lowerCamelCase: int = 2 , **_lowerCamelCase: Any ):
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = tf.keras.layers.ConvaD(
filters=UpperCamelCase__ , kernel_size=1 , strides=UpperCamelCase__ , use_bias=UpperCamelCase__ , name='''convolution''' )
SCREAMING_SNAKE_CASE_ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
def _A ( self: int , _lowerCamelCase: Dict , _lowerCamelCase: Optional[Any] = False ):
return self.normalization(self.convolution(UpperCamelCase__ ) , training=UpperCamelCase__ )
class __magic_name__ ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self: List[Any] , _lowerCamelCase: str , _lowerCamelCase: Any , **_lowerCamelCase: Dict ):
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCamelCase__ , name='''pooler''' )
SCREAMING_SNAKE_CASE_ = [
tf.keras.layers.ConvaD(filters=UpperCamelCase__ , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=UpperCamelCase__ , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def _A ( self: Tuple , _lowerCamelCase: List[str] ):
SCREAMING_SNAKE_CASE_ = self.pooler(UpperCamelCase__ )
for layer_module in self.attention:
SCREAMING_SNAKE_CASE_ = layer_module(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = hidden_state * pooled
return hidden_state
class __magic_name__ ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self: int , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Dict , _lowerCamelCase: Dict , _lowerCamelCase: Tuple = 1 , **_lowerCamelCase: Dict ):
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE_ = max(1 , out_channels // config.groups_width )
SCREAMING_SNAKE_CASE_ = (
TFRegNetShortCut(UpperCamelCase__ , stride=UpperCamelCase__ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
SCREAMING_SNAKE_CASE_ = [
TFRegNetConvLayer(UpperCamelCase__ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
UpperCamelCase__ , stride=UpperCamelCase__ , groups=UpperCamelCase__ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(UpperCamelCase__ , kernel_size=1 , activation=UpperCamelCase__ , name='''layer.2''' ),
]
SCREAMING_SNAKE_CASE_ = ACTaFN[config.hidden_act]
def _A ( self: Optional[int] , _lowerCamelCase: int ):
SCREAMING_SNAKE_CASE_ = hidden_state
for layer_module in self.layers:
SCREAMING_SNAKE_CASE_ = layer_module(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = self.shortcut(UpperCamelCase__ )
hidden_state += residual
SCREAMING_SNAKE_CASE_ = self.activation(UpperCamelCase__ )
return hidden_state
class __magic_name__ ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self: List[str] , _lowerCamelCase: Any , _lowerCamelCase: Dict , _lowerCamelCase: Any , _lowerCamelCase: Optional[Any] = 1 , **_lowerCamelCase: int ):
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE_ = max(1 , out_channels // config.groups_width )
SCREAMING_SNAKE_CASE_ = (
TFRegNetShortCut(UpperCamelCase__ , stride=UpperCamelCase__ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
SCREAMING_SNAKE_CASE_ = [
TFRegNetConvLayer(UpperCamelCase__ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
UpperCamelCase__ , stride=UpperCamelCase__ , groups=UpperCamelCase__ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(UpperCamelCase__ , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(UpperCamelCase__ , kernel_size=1 , activation=UpperCamelCase__ , name='''layer.3''' ),
]
SCREAMING_SNAKE_CASE_ = ACTaFN[config.hidden_act]
def _A ( self: List[str] , _lowerCamelCase: Optional[int] ):
SCREAMING_SNAKE_CASE_ = hidden_state
for layer_module in self.layers:
SCREAMING_SNAKE_CASE_ = layer_module(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = self.shortcut(UpperCamelCase__ )
hidden_state += residual
SCREAMING_SNAKE_CASE_ = self.activation(UpperCamelCase__ )
return hidden_state
class __magic_name__ ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self: List[str] , _lowerCamelCase: Optional[Any] , _lowerCamelCase: str , _lowerCamelCase: Tuple , _lowerCamelCase: Optional[Any] = 2 , _lowerCamelCase: List[str] = 2 , **_lowerCamelCase: List[str] ):
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
SCREAMING_SNAKE_CASE_ = [
# downsampling is done in the first layer with stride of 2
layer(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , stride=UpperCamelCase__ , name='''layers.0''' ),
*[layer(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , name=f"layers.{i+1}" ) for i in range(depth - 1 )],
]
def _A ( self: List[str] , _lowerCamelCase: List[str] ):
for layer_module in self.layers:
SCREAMING_SNAKE_CASE_ = layer_module(UpperCamelCase__ )
return hidden_state
class __magic_name__ ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self: Optional[Any] , _lowerCamelCase: str , **_lowerCamelCase: Tuple ):
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
UpperCamelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
SCREAMING_SNAKE_CASE_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(UpperCamelCase__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , depth=UpperCamelCase__ , name=f"stages.{i+1}" ) )
def _A ( self: Optional[Any] , _lowerCamelCase: Any , _lowerCamelCase: Any = False , _lowerCamelCase: Optional[int] = True ):
SCREAMING_SNAKE_CASE_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
SCREAMING_SNAKE_CASE_ = hidden_states + (hidden_state,)
SCREAMING_SNAKE_CASE_ = stage_module(UpperCamelCase__ )
if output_hidden_states:
SCREAMING_SNAKE_CASE_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=UpperCamelCase__ , hidden_states=UpperCamelCase__ )
@keras_serializable
class __magic_name__ ( tf.keras.layers.Layer):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = RegNetConfig
def __init__( self: Union[str, Any] , _lowerCamelCase: Optional[Any] , **_lowerCamelCase: str ):
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = config
SCREAMING_SNAKE_CASE_ = TFRegNetEmbeddings(UpperCamelCase__ , name='''embedder''' )
SCREAMING_SNAKE_CASE_ = TFRegNetEncoder(UpperCamelCase__ , name='''encoder''' )
SCREAMING_SNAKE_CASE_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCamelCase__ , name='''pooler''' )
@unpack_inputs
def _A ( self: Union[str, Any] , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Optional[int] = None , _lowerCamelCase: Optional[Any] = None , _lowerCamelCase: Optional[int] = False , ):
SCREAMING_SNAKE_CASE_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ = self.embedder(UpperCamelCase__ , training=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = self.encoder(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , training=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = encoder_outputs[0]
SCREAMING_SNAKE_CASE_ = self.pooler(UpperCamelCase__ )
# Change to NCHW output format have uniformity in the modules
SCREAMING_SNAKE_CASE_ = tf.transpose(UpperCamelCase__ , perm=(0, 3, 1, 2) )
SCREAMING_SNAKE_CASE_ = tf.transpose(UpperCamelCase__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
SCREAMING_SNAKE_CASE_ = tuple([tf.transpose(UpperCamelCase__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase__ , pooler_output=UpperCamelCase__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class __magic_name__ ( _A):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = RegNetConfig
SCREAMING_SNAKE_CASE__ : Optional[int] = "regnet"
SCREAMING_SNAKE_CASE__ : int = "pixel_values"
@property
def _A ( self: str ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
__SCREAMING_SNAKE_CASE =R'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
__SCREAMING_SNAKE_CASE =R'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , _A , )
class __magic_name__ ( _A):
'''simple docstring'''
def __init__( self: Optional[Any] , _lowerCamelCase: Optional[Any] , *_lowerCamelCase: Optional[Any] , **_lowerCamelCase: Optional[Any] ):
super().__init__(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = TFRegNetMainLayer(UpperCamelCase__ , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _A ( self: Union[str, Any] , _lowerCamelCase: Optional[int] , _lowerCamelCase: Optional[int] = None , _lowerCamelCase: List[str] = None , _lowerCamelCase: Optional[Any]=False , ):
SCREAMING_SNAKE_CASE_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ = self.regnet(
pixel_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , training=UpperCamelCase__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _A , )
class __magic_name__ ( _A , _A):
'''simple docstring'''
def __init__( self: Tuple , _lowerCamelCase: Union[str, Any] , *_lowerCamelCase: Tuple , **_lowerCamelCase: Tuple ):
super().__init__(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = config.num_labels
SCREAMING_SNAKE_CASE_ = TFRegNetMainLayer(UpperCamelCase__ , name='''regnet''' )
# classification head
SCREAMING_SNAKE_CASE_ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _A ( self: Tuple , _lowerCamelCase: int = None , _lowerCamelCase: Union[str, Any] = None , _lowerCamelCase: Optional[int] = None , _lowerCamelCase: Tuple = None , _lowerCamelCase: Optional[int]=False , ):
SCREAMING_SNAKE_CASE_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ = self.regnet(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , training=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE_ = self.classifier[0](UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = self.classifier[1](UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = None if labels is None else self.hf_compute_loss(labels=UpperCamelCase__ , logits=UpperCamelCase__ )
if not return_dict:
SCREAMING_SNAKE_CASE_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=UpperCamelCase__ , logits=UpperCamelCase__ , hidden_states=outputs.hidden_states )
| 234 | '''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
__snake_case : Any = _symbol_database.Default()
__snake_case : Dict = _descriptor_pool.Default().AddSerializedFile(
B'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'''
)
__snake_case : Union[str, Any] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
__snake_case : Any = None
__snake_case : Dict = B'''H\003'''
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
__snake_case : Union[str, Any] = 45
__snake_case : str = 15_81
__snake_case : Optional[int] = 15_17
__snake_case : Optional[Any] = 15_70
__snake_case : Union[str, Any] = 15_84
__snake_case : Any = 17_93
__snake_case : Optional[int] = 17_95
__snake_case : Tuple = 19_16
__snake_case : int = 18_64
__snake_case : Any = 19_05
__snake_case : Optional[int] = 19_19
__snake_case : str = 24_29
__snake_case : Tuple = 22_08
__snake_case : str = 24_18
__snake_case : Tuple = 23_23
__snake_case : Optional[int] = 24_07
# @@protoc_insertion_point(module_scope)
| 660 | 0 |
from collections import deque
from math import floor
from random import random
from time import time
class lowerCamelCase :
'''simple docstring'''
def __init__( self ) -> List[str]:
UpperCAmelCase_ : str = {}
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1 ) -> Union[str, Any]:
if self.graph.get(UpperCamelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
UpperCAmelCase_ : Tuple = [[w, v]]
if not self.graph.get(UpperCamelCase__ ):
UpperCAmelCase_ : Any = []
def __UpperCAmelCase ( self ) -> Dict:
return list(self.graph )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> int:
if self.graph.get(UpperCamelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(UpperCamelCase__ )
def __UpperCAmelCase ( self , _UpperCamelCase=-2 , _UpperCamelCase=-1 ) -> Optional[int]:
if s == d:
return []
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Optional[int] = []
if s == -2:
UpperCAmelCase_ : Optional[Any] = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
UpperCAmelCase_ : int = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase_ : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(UpperCamelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase_ : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(UpperCamelCase__ ) != 0:
UpperCAmelCase_ : Any = stack[len(UpperCamelCase__ ) - 1]
else:
UpperCAmelCase_ : Optional[int] = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return visited
def __UpperCAmelCase ( self , _UpperCamelCase=-1 ) -> List[str]:
if c == -1:
UpperCAmelCase_ : int = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(UpperCamelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
UpperCAmelCase_ : Optional[int] = floor(random() * c ) + 1
if n != i:
self.add_pair(UpperCamelCase__ , UpperCamelCase__ , 1 )
def __UpperCAmelCase ( self , _UpperCamelCase=-2 ) -> List[str]:
UpperCAmelCase_ : str = deque()
UpperCAmelCase_ : Any = []
if s == -2:
UpperCAmelCase_ : Tuple = list(self.graph )[0]
d.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
while d:
UpperCAmelCase_ : Union[str, Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : str = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def __UpperCAmelCase ( self , _UpperCamelCase ) -> int:
return len(self.graph[u] )
def __UpperCAmelCase ( self , _UpperCamelCase=-2 ) -> List[str]:
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Optional[int] = []
if s == -2:
UpperCAmelCase_ : List[str] = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
UpperCAmelCase_ : Dict = s
UpperCAmelCase_ : Dict = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase_ : Any = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase_ : Dict = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(UpperCamelCase__ ) != 0:
UpperCAmelCase_ : Any = stack[len(UpperCamelCase__ ) - 1]
else:
UpperCAmelCase_ : str = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return sorted_nodes
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Tuple = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
UpperCAmelCase_ : Tuple = -2
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Union[str, Any] = s
UpperCAmelCase_ : int = False
UpperCAmelCase_ : List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase_ : str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase_ : Optional[Any] = len(UpperCamelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase_ : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase_ : Dict = True
if len(UpperCamelCase__ ) != 0:
UpperCAmelCase_ : int = stack[len(UpperCamelCase__ ) - 1]
else:
UpperCAmelCase_ : str = False
indirect_parents.append(UpperCamelCase__ )
UpperCAmelCase_ : Union[str, Any] = s
UpperCAmelCase_ : List[Any] = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return list(UpperCamelCase__ )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Dict = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
UpperCAmelCase_ : Tuple = -2
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Optional[Any] = s
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase_ : Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase_ : List[Any] = len(UpperCamelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase_ : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase_ : List[Any] = True
if len(UpperCamelCase__ ) != 0:
UpperCAmelCase_ : Any = stack[len(UpperCamelCase__ ) - 1]
else:
UpperCAmelCase_ : Dict = False
indirect_parents.append(UpperCamelCase__ )
UpperCAmelCase_ : str = s
UpperCAmelCase_ : Dict = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return False
def __UpperCAmelCase ( self , _UpperCamelCase=-2 , _UpperCamelCase=-1 ) -> List[str]:
UpperCAmelCase_ : List[Any] = time()
self.dfs(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ : int = time()
return end - begin
def __UpperCAmelCase ( self , _UpperCamelCase=-2 ) -> Dict:
UpperCAmelCase_ : List[str] = time()
self.bfs(UpperCamelCase__ )
UpperCAmelCase_ : List[Any] = time()
return end - begin
class lowerCamelCase :
'''simple docstring'''
def __init__( self ) -> Tuple:
UpperCAmelCase_ : int = {}
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1 ) -> Tuple:
if self.graph.get(UpperCamelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
UpperCAmelCase_ : Optional[Any] = [[w, v]]
# add the other way
if self.graph.get(UpperCamelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
UpperCAmelCase_ : Tuple = [[w, u]]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> str:
if self.graph.get(UpperCamelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(UpperCamelCase__ )
# the other way round
if self.graph.get(UpperCamelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(UpperCamelCase__ )
def __UpperCAmelCase ( self , _UpperCamelCase=-2 , _UpperCamelCase=-1 ) -> Union[str, Any]:
if s == d:
return []
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : List[Any] = []
if s == -2:
UpperCAmelCase_ : Union[str, Any] = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
UpperCAmelCase_ : Any = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase_ : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(UpperCamelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase_ : Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(UpperCamelCase__ ) != 0:
UpperCAmelCase_ : str = stack[len(UpperCamelCase__ ) - 1]
else:
UpperCAmelCase_ : Optional[Any] = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return visited
def __UpperCAmelCase ( self , _UpperCamelCase=-1 ) -> Dict:
if c == -1:
UpperCAmelCase_ : List[str] = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(UpperCamelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
UpperCAmelCase_ : Optional[int] = floor(random() * c ) + 1
if n != i:
self.add_pair(UpperCamelCase__ , UpperCamelCase__ , 1 )
def __UpperCAmelCase ( self , _UpperCamelCase=-2 ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = deque()
UpperCAmelCase_ : Dict = []
if s == -2:
UpperCAmelCase_ : List[str] = list(self.graph )[0]
d.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
while d:
UpperCAmelCase_ : Dict = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict:
return len(self.graph[u] )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : int = []
UpperCAmelCase_ : List[str] = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
UpperCAmelCase_ : Dict = -2
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : str = s
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase_ : Tuple = len(UpperCamelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase_ : int = True
if len(UpperCamelCase__ ) != 0:
UpperCAmelCase_ : List[Any] = stack[len(UpperCamelCase__ ) - 1]
else:
UpperCAmelCase_ : Dict = False
indirect_parents.append(UpperCamelCase__ )
UpperCAmelCase_ : List[str] = s
UpperCAmelCase_ : List[str] = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return list(UpperCamelCase__ )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : List[Any] = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
UpperCAmelCase_ : List[str] = -2
UpperCAmelCase_ : int = []
UpperCAmelCase_ : str = s
UpperCAmelCase_ : Optional[int] = False
UpperCAmelCase_ : int = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase_ : Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase_ : List[Any] = len(UpperCamelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase_ : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase_ : Dict = True
if len(UpperCamelCase__ ) != 0:
UpperCAmelCase_ : Optional[Any] = stack[len(UpperCamelCase__ ) - 1]
else:
UpperCAmelCase_ : Dict = False
indirect_parents.append(UpperCamelCase__ )
UpperCAmelCase_ : Dict = s
UpperCAmelCase_ : List[Any] = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return False
def __UpperCAmelCase ( self ) -> Tuple:
return list(self.graph )
def __UpperCAmelCase ( self , _UpperCamelCase=-2 , _UpperCamelCase=-1 ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = time()
self.dfs(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ : Dict = time()
return end - begin
def __UpperCAmelCase ( self , _UpperCamelCase=-2 ) -> List[Any]:
UpperCAmelCase_ : Dict = time()
self.bfs(UpperCamelCase__ )
UpperCAmelCase_ : List[Any] = time()
return end - begin
| 406 | '''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase_ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = "The dog is cute and lives in the garden house"
UpperCAmelCase_ = jnp.array([tokenizer.encode(UpperCamelCase__ )] )
UpperCAmelCase_ = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
UpperCAmelCase_ = model(UpperCamelCase__ )["last_hidden_state"]
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1e-3 ) )
| 660 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a__ :
def __init__( self : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : Tuple=4 , UpperCamelCase_ : Union[str, Any]=2 , UpperCamelCase_ : Optional[int]=7 , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Optional[int]=99 , UpperCamelCase_ : Tuple=36 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : Any=37 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : List[str]=512 , UpperCamelCase_ : int=16 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : Dict=0.02 , UpperCamelCase_ : int=6 , UpperCamelCase_ : Dict=6 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Union[str, Any]=1000 , ):
"""simple docstring"""
__UpperCAmelCase : Any = parent
__UpperCAmelCase : Optional[int] = batch_size
__UpperCAmelCase : List[Any] = num_channels
__UpperCAmelCase : Dict = image_size
__UpperCAmelCase : Any = patch_size
__UpperCAmelCase : Dict = is_training
__UpperCAmelCase : List[str] = use_input_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : List[str] = num_attention_heads
__UpperCAmelCase : Union[str, Any] = intermediate_size
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : Dict = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : str = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : int = coordinate_size
__UpperCAmelCase : Optional[int] = shape_size
__UpperCAmelCase : Tuple = num_labels
__UpperCAmelCase : int = num_choices
__UpperCAmelCase : List[str] = scope
__UpperCAmelCase : List[str] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__UpperCAmelCase : Dict = text_seq_length
__UpperCAmelCase : str = (image_size // patch_size) ** 2 + 1
__UpperCAmelCase : Dict = self.text_seq_length + self.image_seq_length
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size)
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox)
__UpperCAmelCase : Union[str, Any] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
__UpperCAmelCase : Optional[Any] = bbox[i, j, 3]
__UpperCAmelCase : int = bbox[i, j, 1]
__UpperCAmelCase : List[Any] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__UpperCAmelCase : int = bbox[i, j, 2]
__UpperCAmelCase : List[Any] = bbox[i, j, 0]
__UpperCAmelCase : Any = tmp_coordinate
__UpperCAmelCase : Optional[Any] = tf.constant(UpperCamelCase__)
__UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCAmelCase : Union[str, Any] = None
if self.use_input_mask:
__UpperCAmelCase : str = random_attention_mask([self.batch_size, self.text_seq_length])
__UpperCAmelCase : int = None
if self.use_token_type_ids:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size)
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Any = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels)
__UpperCAmelCase : int = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def a_ ( self : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Any = TFLayoutLMvaModel(config=UpperCamelCase__)
# text + image
__UpperCAmelCase : Tuple = model(UpperCamelCase__ , pixel_values=UpperCamelCase__ , training=UpperCamelCase__)
__UpperCAmelCase : Union[str, Any] = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , training=UpperCamelCase__ , )
__UpperCAmelCase : int = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , training=UpperCamelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# text only
__UpperCAmelCase : Optional[Any] = model(UpperCamelCase__ , training=UpperCamelCase__)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size))
# image only
__UpperCAmelCase : str = model({"pixel_values": pixel_values} , training=UpperCamelCase__)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size))
def a_ ( self : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]):
"""simple docstring"""
__UpperCAmelCase : int = self.num_labels
__UpperCAmelCase : List[str] = TFLayoutLMvaForSequenceClassification(config=UpperCamelCase__)
__UpperCAmelCase : str = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def a_ ( self : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : int = self.num_labels
__UpperCAmelCase : Tuple = TFLayoutLMvaForTokenClassification(config=UpperCamelCase__)
__UpperCAmelCase : Optional[Any] = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels))
def a_ ( self : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = 2
__UpperCAmelCase : Dict = TFLayoutLMvaForQuestionAnswering(config=UpperCamelCase__)
__UpperCAmelCase : Optional[Any] = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , training=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : Dict = config_and_inputs
__UpperCAmelCase : List[str] = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class a__ ( _A , _A , unittest.TestCase ):
lowercase_ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase_ = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
def a_ ( self : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : List[Any]):
"""simple docstring"""
return True
def a_ ( self : Union[str, Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any]=False):
"""simple docstring"""
__UpperCAmelCase : List[str] = copy.deepcopy(UpperCamelCase__)
if model_class in get_values(UpperCamelCase__):
__UpperCAmelCase : List[str] = {
k: tf.tile(tf.expand_dims(UpperCamelCase__ , 1) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))
if isinstance(UpperCamelCase__ , tf.Tensor) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCamelCase__):
__UpperCAmelCase : Tuple = tf.ones(self.model_tester.batch_size , dtype=tf.intaa)
elif model_class in get_values(UpperCamelCase__):
__UpperCAmelCase : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
__UpperCAmelCase : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
elif model_class in get_values(UpperCamelCase__):
__UpperCAmelCase : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
elif model_class in get_values(UpperCamelCase__):
__UpperCAmelCase : Union[str, Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa)
return inputs_dict
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : int = TFLayoutLMvaModelTester(self)
__UpperCAmelCase : Dict = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37)
def a_ ( self : int):
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : List[str] = model_class(UpperCamelCase__)
if getattr(UpperCamelCase__ , "hf_compute_loss" , UpperCamelCase__):
# The number of elements in the loss should be the same as the number of elements in the label
__UpperCAmelCase : str = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__)
__UpperCAmelCase : Any = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=UpperCamelCase__)[0]
]
__UpperCAmelCase : Optional[Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__UpperCAmelCase : str = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__)
__UpperCAmelCase : int = prepared_for_class.pop("input_ids")
__UpperCAmelCase : Optional[int] = model(UpperCamelCase__ , **UpperCamelCase__)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss when we mask some positions
__UpperCAmelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__)
__UpperCAmelCase : Tuple = prepared_for_class.pop("input_ids")
if "labels" in prepared_for_class:
__UpperCAmelCase : Union[str, Any] = prepared_for_class["labels"].numpy()
if len(labels.shape) > 1 and labels.shape[1] != 1:
__UpperCAmelCase : Dict = -100
__UpperCAmelCase : Optional[int] = tf.convert_to_tensor(UpperCamelCase__)
__UpperCAmelCase : str = model(UpperCamelCase__ , **UpperCamelCase__)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
self.assertTrue(not np.any(np.isnan(loss.numpy())))
# Test that model correctly compute the loss with a dict
__UpperCAmelCase : Any = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__)
__UpperCAmelCase : Union[str, Any] = model(UpperCamelCase__)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss with a tuple
__UpperCAmelCase : Any = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__)
# Get keys that were added with the _prepare_for_class function
__UpperCAmelCase : List[str] = prepared_for_class.keys() - inputs_dict.keys()
__UpperCAmelCase : str = inspect.signature(model.call).parameters
__UpperCAmelCase : Tuple = list(signature.keys())
# Create a dictionary holding the location of the tensors in the tuple
__UpperCAmelCase : List[str] = {0: "input_ids"}
for label_key in label_keys:
__UpperCAmelCase : Tuple = signature_names.index(UpperCamelCase__)
__UpperCAmelCase : Optional[Any] = label_key
__UpperCAmelCase : Optional[Any] = sorted(tuple_index_mapping.items())
# Initialize a list with their default values, update the values and convert to a tuple
__UpperCAmelCase : Tuple = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default)
for index, value in sorted_tuple_index_mapping:
__UpperCAmelCase : List[str] = prepared_for_class[value]
__UpperCAmelCase : Dict = tuple(UpperCamelCase__)
# Send to model
__UpperCAmelCase : List[str] = model(tuple_input[:-1])[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
def a_ ( self : List[Any]):
"""simple docstring"""
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
def a_ ( self : Optional[Any]):
"""simple docstring"""
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : str = type
self.model_tester.create_and_check_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
def a_ ( self : Any):
"""simple docstring"""
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
def a_ ( self : List[Any]):
"""simple docstring"""
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
def a_ ( self : List[str]):
"""simple docstring"""
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
@slow
def a_ ( self : Union[str, Any]):
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = TFLayoutLMvaModel.from_pretrained(UpperCamelCase__)
self.assertIsNotNone(UpperCamelCase__)
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class a__ ( unittest.TestCase ):
@cached_property
def a_ ( self : Tuple):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__) if is_vision_available() else None
@slow
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : List[str] = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base")
__UpperCAmelCase : Tuple = self.default_image_processor
__UpperCAmelCase : str = prepare_img()
__UpperCAmelCase : List[Any] = image_processor(images=UpperCamelCase__ , return_tensors="tf").pixel_values
__UpperCAmelCase : str = tf.constant([[1, 2]])
__UpperCAmelCase : Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]]) , axis=0)
# forward pass
__UpperCAmelCase : Any = model(input_ids=UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , training=UpperCamelCase__)
# verify the logits
__UpperCAmelCase : Optional[Any] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase__)
__UpperCAmelCase : Dict = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]])
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1e-4))
| 77 | '''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A_ , A_ , A_ ):
# Construct model
if gpta_config_file == "":
UpperCAmelCase_ = GPTaConfig()
else:
UpperCAmelCase_ = GPTaConfig.from_json_file(A_ )
UpperCAmelCase_ = GPTaModel(A_ )
# Load weights from numpy
load_tf_weights_in_gpta(A_ , A_ , A_ )
# Save pytorch-model
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , A_ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(A_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
__snake_case : Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 | 0 |
"""simple docstring"""
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bza_file, BzipaExtractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lza_file, LzaExtractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = input_paths_and_base_extractors[compression_format]
if input_path is None:
__SCREAMING_SNAKE_CASE = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(A_ )
assert base_extractor.is_extractable(A_ )
__SCREAMING_SNAKE_CASE = tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(A_ , A_ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__SCREAMING_SNAKE_CASE = file_path.read_text(encoding="utf-8" )
else:
__SCREAMING_SNAKE_CASE = output_path.read_text(encoding="utf-8" )
__SCREAMING_SNAKE_CASE = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {
"7z": seven_zip_file,
"bz2": bza_file,
"gzip": gz_file,
"lz4": lza_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
__SCREAMING_SNAKE_CASE = input_paths[compression_format]
if input_path is None:
__SCREAMING_SNAKE_CASE = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(A_ )
__SCREAMING_SNAKE_CASE = Extractor.infer_extractor_format(A_ )
assert extractor_format is not None
__SCREAMING_SNAKE_CASE = tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(A_ , A_ , A_ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__SCREAMING_SNAKE_CASE = file_path.read_text(encoding="utf-8" )
else:
__SCREAMING_SNAKE_CASE = output_path.read_text(encoding="utf-8" )
__SCREAMING_SNAKE_CASE = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
import tarfile
__SCREAMING_SNAKE_CASE = tmp_path / "data_dot_dot"
directory.mkdir()
__SCREAMING_SNAKE_CASE = directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(A_ , "w" ) as f:
f.add(A_ , arcname=os.path.join(".." , text_file.name ) )
return path
@pytest.fixture
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
import tarfile
__SCREAMING_SNAKE_CASE = tmp_path / "data_sym_link"
directory.mkdir()
__SCREAMING_SNAKE_CASE = directory / "tar_file_with_sym_link.tar"
os.symlink(".." , directory / "subdir" , target_is_directory=A_ )
with tarfile.TarFile(A_ , "w" ) as f:
f.add(str(directory / "subdir" ) , arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log" , [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] , )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
__SCREAMING_SNAKE_CASE = insecure_tar_files[insecure_tar_file]
__SCREAMING_SNAKE_CASE = tmp_path / "extracted"
TarExtractor.extract(A_ , A_ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
__SCREAMING_SNAKE_CASE = (
b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
b"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
b"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
b"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open("wb" ) as f:
f.write(A_ )
assert zipfile.is_zipfile(str(A_ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(A_ ) # but we're right
| 682 | '''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase__ ( A_ ):
def decorator(A_ ):
UpperCAmelCase_ = getattr(A_ , "handle_key" , [] )
handle += [key]
setattr(A_ , "handle_key" , A_ )
return func
return decorator
def lowerCamelCase__ ( *A_ ):
def decorator(A_ ):
UpperCAmelCase_ = getattr(A_ , "handle_key" , [] )
handle += keys
setattr(A_ , "handle_key" , A_ )
return func
return decorator
class lowercase_ ( _A ):
def __new__( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = super().__new__(cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not hasattr(UpperCamelCase__ , "key_handler" ):
setattr(UpperCamelCase__ , "key_handler" , {} )
setattr(UpperCamelCase__ , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
UpperCAmelCase_ = getattr(UpperCamelCase__ , "handle_key" , [] )
for key in handled_keys:
UpperCAmelCase_ = value
return new_cls
@staticmethod
def lowerCamelCase_ ( cls ) -> str:
"""simple docstring"""
UpperCAmelCase_ = get_character()
if char != KEYMAP["undefined"]:
UpperCAmelCase_ = ord(UpperCamelCase__ )
UpperCAmelCase_ = cls.key_handler.get(UpperCamelCase__ )
if handler:
UpperCAmelCase_ = char
return handler(cls )
else:
return None
def lowerCamelCase__ ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 660 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_UpperCamelCase = None
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_UpperCamelCase = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
_UpperCamelCase = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
_UpperCamelCase = '''▁'''
# Segments (not really needed)
_UpperCamelCase = 0
_UpperCamelCase = 1
_UpperCamelCase = 2
_UpperCamelCase = 3
_UpperCamelCase = 4
class __UpperCAmelCase (_A ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
_UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : str = 'left'
_UpperCamelCase : Tuple = XLNetTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=True , snake_case_=False , snake_case_="<s>" , snake_case_="</s>" , snake_case_="<unk>" , snake_case_="<sep>" , snake_case_="<pad>" , snake_case_="<cls>" , snake_case_="<mask>" , snake_case_=["<eop>", "<eod>"] , **snake_case_ , ):
'''simple docstring'''
A__ : Dict = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
vocab_file=UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Optional[int] = 3
A__ : Tuple = do_lower_case
A__ : Tuple = remove_space
A__ : Union[str, Any] = keep_accents
A__ : Dict = vocab_file
A__ : Optional[Any] = False if not self.vocab_file else True
def lowerCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
A__ : Union[str, Any] = [self.sep_token_id]
A__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
A__ : Any = [self.sep_token_id]
A__ : Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : List[Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 363 | '''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__snake_case : Optional[Any] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowercase_ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=1_6 , UpperCamelCase__=1_3 , UpperCamelCase__=7 , UpperCamelCase__=1_4 , UpperCamelCase__=1_0 , UpperCamelCase__=1_9 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=True , UpperCamelCase__=1_6 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=[1, 2, 3, 4, 5] , UpperCamelCase__=2_5 , UpperCamelCase__=5 , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = d_model
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = prediction_length
UpperCAmelCase_ = context_length
UpperCAmelCase_ = cardinality
UpperCAmelCase_ = num_time_features
UpperCAmelCase_ = lags_sequence
UpperCAmelCase_ = embedding_dimension
UpperCAmelCase_ = is_training
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = context_length
UpperCAmelCase_ = prediction_length + label_length
UpperCAmelCase_ = label_length
UpperCAmelCase_ = moving_average
UpperCAmelCase_ = autocorrelation_factor
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = config.context_length + max(config.lags_sequence )
UpperCAmelCase_ = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
UpperCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
UpperCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length] )
UpperCAmelCase_ = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.prepare_autoformer_inputs_dict(UpperCamelCase__ )
return config, inputs_dict
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModel(config=UpperCamelCase__ ).to(UpperCamelCase__ ).eval()
UpperCAmelCase_ = model(**UpperCamelCase__ )
UpperCAmelCase_ = outputs.encoder_last_hidden_state
UpperCAmelCase_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = model.get_encoder()
encoder.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = AutoformerEncoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = model.create_network_inputs(**UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
UpperCAmelCase_ = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
UpperCAmelCase_ = encoder(inputs_embeds=UpperCamelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
UpperCAmelCase_ = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
UpperCAmelCase_ = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
UpperCAmelCase_ = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
UpperCAmelCase_ = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = model.get_decoder()
decoder.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = AutoformerDecoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCAmelCase_ = decoder(
trend=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowercase_ ( _A , _A , unittest.TestCase ):
a_ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a_ = (AutoformerForPrediction,) if is_torch_available() else ()
a_ = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model_class.from_pretrained(UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
self.assertEqual(info["missing_keys"] , [] )
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCamelCase__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
pass
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = inspect.signature(getattr(UpperCamelCase__ , "forward" ) )
# The main input is the name of the argument after `self`
UpperCAmelCase_ = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCamelCase__ )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(UpperCamelCase__ )] , UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
UpperCAmelCase_ = getattr(self.model_tester , "seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "decoder_seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "encoder_seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "d_model" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "num_attention_heads" , UpperCamelCase__ )
UpperCAmelCase_ = d_model // num_attention_heads
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
UpperCAmelCase_ = len(UpperCamelCase__ )
UpperCAmelCase_ = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# decoder attentions
UpperCAmelCase_ = outputs.decoder_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
UpperCAmelCase_ = outputs.cross_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 2 , len(UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowerCamelCase__ ( A_="train-batch.pt" ):
UpperCAmelCase_ = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=A_ , repo_type="dataset" )
UpperCAmelCase_ = torch.load(A_ , map_location=A_ )
return batch
@require_torch
@slow
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch()
with torch.no_grad():
UpperCAmelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
UpperCAmelCase_ = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
UpperCAmelCase_ = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase_ = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
UpperCAmelCase_ = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=UpperCamelCase__ )
UpperCAmelCase_ = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCamelCase__ , rtol=1e-1 ) )
| 660 | 0 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
a : Any = _symbol_database.Default()
a : Dict = _descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
a : Union[str, Any] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
a : Any = None
a : Dict = B'''H\003'''
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
a : Union[str, Any] = 4_5
a : str = 1_5_8_1
a : Optional[int] = 1_5_1_7
a : Optional[Any] = 1_5_7_0
a : Union[str, Any] = 1_5_8_4
a : Any = 1_7_9_3
a : Optional[int] = 1_7_9_5
a : Tuple = 1_9_1_6
a : int = 1_8_6_4
a : Any = 1_9_0_5
a : Optional[int] = 1_9_1_9
a : str = 2_4_2_9
a : Tuple = 2_2_0_8
a : str = 2_4_1_8
a : Tuple = 2_3_2_3
a : Optional[int] = 2_4_0_7
# @@protoc_insertion_point(module_scope) | 613 | '''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : Tuple = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
__snake_case : Tuple = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
__snake_case : Dict = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def lowerCamelCase__ ( A_ , A_ ):
with open(A_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
with open(A_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(A_ ):
UpperCAmelCase_ = b
UpperCAmelCase_ = idx
for wd in b:
UpperCAmelCase_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowercase_ ( _A ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|startoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ) -> int:
"""simple docstring"""
super().__init__(
unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , do_clean_text=UpperCamelCase__ , **UpperCamelCase__ , )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
UpperCAmelCase_ = do_clean_text
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = load_vocab_and_emoji(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return len(self.raw_vocab )
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(UpperCamelCase__ , clean=self.do_clean_text )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> int:
"""simple docstring"""
return self.vocab.get(UpperCamelCase__ , self.vocab.get(self.unk_token ) )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = "".join(UpperCamelCase__ ).strip()
return out_string
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ = 0
if os.path.isdir(UpperCamelCase__ ):
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase_ = token_index
writer.write(",".join(UpperCamelCase__ ) + "\n" )
index += 1
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , UpperCamelCase__ )
return vocab_file, emoji_file
class lowercase_ ( _A ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = vocab # same as swe
UpperCAmelCase_ = ids_to_tokens # same as bpe
UpperCAmelCase_ = emoji
UpperCAmelCase_ = np.max([len(UpperCamelCase__ ) for w in self.vocab.keys()] )
UpperCAmelCase_ = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
UpperCAmelCase_ = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
UpperCAmelCase_ = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
UpperCAmelCase_ = re.compile(
R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
UpperCAmelCase_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
UpperCAmelCase_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
UpperCAmelCase_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self ) -> int:
"""simple docstring"""
return len(self.ids_to_tokens )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = self.content_repattera.sub("<URL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<EMAIL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<TEL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<PRICE>" , UpperCamelCase__ )
UpperCAmelCase_ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=False ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace("\r\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\r" , "<BR>" )
UpperCAmelCase_ = text.replace("\t" , "<TAB>" )
UpperCAmelCase_ = text.replace("—" , "ー" )
UpperCAmelCase_ = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase_ = text.replace(UpperCamelCase__ , UpperCamelCase__ )
if clean:
UpperCAmelCase_ = self.clean_text(UpperCamelCase__ )
def check_simbol(UpperCamelCase__ ):
UpperCAmelCase_ = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 2:
UpperCAmelCase_ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc_2_a_1 and c <= 0Xc_2_b_f)
or (c >= 0Xc_7_8_0 and c <= 0Xc_7_8_3)
or (c >= 0Xc_a_b_9 and c <= 0Xc_b_b_f)
or (c >= 0Xc_c_8_0 and c <= 0Xc_d_a_2)
):
return True
return False
def checkuae(UpperCamelCase__ ):
UpperCAmelCase_ = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 3:
UpperCAmelCase_ = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe_2_8_0_8_0 and c <= 0Xe_2_b_0_7_f:
return True
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
while pos < len(UpperCamelCase__ ):
UpperCAmelCase_ = min(len(UpperCamelCase__ ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
UpperCAmelCase_ = [] # (token_id, token, pos)
for e in range(UpperCamelCase__ , UpperCamelCase__ , -1 ):
UpperCAmelCase_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(UpperCamelCase__ ) > 2:
UpperCAmelCase_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(UpperCamelCase__ ) > 0:
# the smallest token_id is adopted
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[0] )[0]
result.append(UpperCamelCase__ )
UpperCAmelCase_ = e
else:
UpperCAmelCase_ = pos + 1
UpperCAmelCase_ = text[pos:end]
if check_simbol(UpperCamelCase__ ):
result.append("<KIGOU>" )
elif checkuae(UpperCamelCase__ ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
UpperCAmelCase_ = end
return result
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__="\n" ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(UpperCamelCase__ )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = "".join(UpperCamelCase__ )
return text
| 660 | 0 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __a :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=3 , _a=0.6 , _a=None , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = parent
SCREAMING_SNAKE_CASE__ : int = batch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = image_size
SCREAMING_SNAKE_CASE__ : Any = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : int = is_training
SCREAMING_SNAKE_CASE__ : Tuple = use_labels
SCREAMING_SNAKE_CASE__ : int = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : int = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Tuple = initializer_range
SCREAMING_SNAKE_CASE__ : Optional[int] = mask_ratio
SCREAMING_SNAKE_CASE__ : Optional[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ : Dict = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _a ( self ) -> Optional[int]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _a ( self , _a , _a , _a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = ViTMAEModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , _a , _a , _a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Any = model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (self.image_size // self.patch_size) ** 2
SCREAMING_SNAKE_CASE__ : List[str] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : Dict = 1
SCREAMING_SNAKE_CASE__ : Dict = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[Any] = model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = config_and_inputs
SCREAMING_SNAKE_CASE__ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __a (_A , _A , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :int = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE :str = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
_SCREAMING_SNAKE_CASE :Optional[int] = False
_SCREAMING_SNAKE_CASE :Optional[Any] = False
_SCREAMING_SNAKE_CASE :Any = False
_SCREAMING_SNAKE_CASE :List[str] = False
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = ViTMAEModelTester(self )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def _a ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def _a ( self ) -> Tuple:
"""simple docstring"""
pass
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = model_class(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def _a ( self , _a , _a , _a ) -> Optional[int]:
"""simple docstring"""
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ : int = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
SCREAMING_SNAKE_CASE__ : Any = torch.from_numpy(UpperCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
SCREAMING_SNAKE_CASE__ : Any = pt_noise
super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : int = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE__ : Any = outputs[0].cpu().numpy()
SCREAMING_SNAKE_CASE__ : List[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any = model_class.from_pretrained(UpperCamelCase__ )
model.to(UpperCamelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : int = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
# Make sure we don't have nans
SCREAMING_SNAKE_CASE__ : Tuple = after_outputs[0].cpu().numpy()
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : Dict = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase__ , 1E-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.""" )
def _a ( self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.""" )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.""" )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self ) -> Any:
"""simple docstring"""
pass
@slow
def _a ( self ) -> int:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Any = ViTMAEModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def _lowercase ( ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __a (unittest.TestCase):
'''simple docstring'''
@cached_property
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def _a ( self ) -> List[Any]:
"""simple docstring"""
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ : Optional[int] = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : Tuple = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
SCREAMING_SNAKE_CASE__ : Optional[Any] = ViTMAEConfig()
SCREAMING_SNAKE_CASE__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = model(**UpperCamelCase__ , noise=torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ ) )
# verify the logits
SCREAMING_SNAKE_CASE__ : Any = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCamelCase__ ) , atol=1E-4 ) )
| 680 | '''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__snake_case : Union[str, Any] = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def lowerCamelCase__ ( ):
UpperCAmelCase_ = Github(os.environ["GITHUB_TOKEN"] )
UpperCAmelCase_ = g.get_repo("huggingface/diffusers" )
UpperCAmelCase_ = repo.get_issues(state="open" )
for issue in open_issues:
UpperCAmelCase_ = sorted(issue.get_comments() , key=lambda A_ : i.created_at , reverse=A_ )
UpperCAmelCase_ = comments[0] if len(A_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 660 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''LayoutLMv3FeatureExtractor''']
lowercase_ = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 470 | '''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__snake_case : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase_ ( datasets.BuilderConfig ):
a_ = 1_0000
a_ = None
a_ = None
class lowercase_ ( datasets.ArrowBasedBuilder ):
a_ = ParquetConfig
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCAmelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase__ , (str, list, tuple) ):
UpperCAmelCase_ = data_files
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCAmelCase_ = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCamelCase__ ):
with open(UpperCamelCase__ , "rb" ) as f:
UpperCAmelCase_ = datasets.Features.from_arrow_schema(pq.read_schema(UpperCamelCase__ ) )
break
splits.append(datasets.SplitGenerator(name=UpperCamelCase__ , gen_kwargs={"files": files} ) )
return splits
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase_ = table_cast(UpperCamelCase__ , self.info.features.arrow_schema )
return pa_table
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase__ ) ):
with open(UpperCamelCase__ , "rb" ) as f:
UpperCAmelCase_ = pq.ParquetFile(UpperCamelCase__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
UpperCAmelCase_ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(UpperCamelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCamelCase__ )}: {e}""" )
raise
| 660 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_A: Any = ['''small''', '''medium''', '''large''']
_A: Dict = '''lm_head.decoder.weight'''
_A: Any = '''lm_head.weight'''
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> Tuple:
__UpperCAmelCase = torch.load(A_ )
__UpperCAmelCase = d.pop(A_ )
os.makedirs(A_ , exist_ok=A_ )
torch.save(A_ , os.path.join(A_ , A_ ) )
if __name__ == "__main__":
_A: int = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
_A: str = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_A: Tuple = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""")
_A: List[Any] = F"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 126 | '''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : Tuple = {'''vocab_file''': '''spiece.model'''}
__snake_case : Dict = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
__snake_case : Tuple = {
'''AI-Sweden/gpt-sw3-126m''': 20_48,
'''AI-Sweden/gpt-sw3-350m''': 20_48,
'''AI-Sweden/gpt-sw3-1.6b''': 20_48,
'''AI-Sweden/gpt-sw3-6.7b''': 20_48,
'''AI-Sweden/gpt-sw3-20b''': 20_48,
}
class lowercase_ ( _A ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
"""simple docstring"""
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase_ = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
UpperCAmelCase_ = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
UpperCAmelCase_ = "<|endoftext|>" if eos_token is None else eos_token
UpperCAmelCase_ = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
UpperCAmelCase_ = unk_token if pad_token is None else pad_token
UpperCAmelCase_ = eos_token if bos_token is None else bos_token
else:
UpperCAmelCase_ = "<pad>" if pad_token is None else pad_token
UpperCAmelCase_ = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# Used for whitespace normalization in input texts
# fmt : off
UpperCAmelCase_ = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
UpperCAmelCase_ = re.compile(
F"""[{"".join(map(UpperCamelCase__ , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]""" )
def __getstate__( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
return len(self.sp_model )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = self.non_printing_characters_re.sub("" , UpperCamelCase__ )
# Normalize whitespaces
UpperCAmelCase_ = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
UpperCAmelCase_ = unicodedata.normalize("NFC" , UpperCamelCase__ )
return text
def lowerCamelCase_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.preprocess_text(UpperCamelCase__ )
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCamelCase__ )
@staticmethod
def lowerCamelCase_ ( UpperCamelCase__ ) -> str:
"""simple docstring"""
return out_string
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = ""
UpperCAmelCase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
UpperCAmelCase_ = True
UpperCAmelCase_ = []
else:
current_sub_tokens.append(UpperCamelCase__ )
UpperCAmelCase_ = False
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string
def lowerCamelCase_ ( self ) -> Dict[str, int]:
"""simple docstring"""
UpperCAmelCase_ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = self.preprocess_text(UpperCamelCase__ )
UpperCAmelCase_ = self.sp_model.encode(UpperCamelCase__ )
else:
UpperCAmelCase_ = [self.preprocess_text(UpperCamelCase__ ) for t in text]
UpperCAmelCase_ = self.sp_model.encode(UpperCamelCase__ )
if return_tensors is True or return_tensors == "pt":
UpperCAmelCase_ = torch.tensor(UpperCamelCase__ )
return token_ids
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
return self.sp_model.decode(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
UpperCAmelCase_ = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(UpperCamelCase__ ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=UpperCamelCase__ )
| 660 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 269 | '''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=3 , UpperCamelCase__=1_8 , UpperCamelCase__=3_0 , UpperCamelCase__=4_0_0 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=[0.5, 0.5, 0.5] , UpperCamelCase__=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 1_8}
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
def lowerCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase_ ( _A , unittest.TestCase ):
a_ = LevitImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = LevitImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "size" ) )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
pass
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 660 | 0 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowerCAmelCase_( lowercase_ : Namespace ) -> Union[str, Any]:
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
__SCREAMING_SNAKE_CASE : Any = '''
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
'''
class lowerCamelCase_( A__ ):
'''simple docstring'''
@staticmethod
def snake_case__ ( lowerCamelCase__ ):
_lowerCamelCase = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=lowerCamelCase__ , required=lowerCamelCase__ , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=lowerCamelCase__ , required=lowerCamelCase__ , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=lowerCamelCase__ , required=lowerCamelCase__ , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=lowerCamelCase__ , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=lowerCamelCase__ , default=lowerCamelCase__ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ , ):
_lowerCamelCase = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F"""Loading model {model_type}""" )
_lowerCamelCase = model_type
_lowerCamelCase = tf_checkpoint
_lowerCamelCase = pytorch_dump_output
_lowerCamelCase = config
_lowerCamelCase = finetuning_task_name
def snake_case__ ( self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
if "ckpt" in self._tf_checkpoint.lower():
_lowerCamelCase = self._tf_checkpoint
_lowerCamelCase = ''''''
else:
_lowerCamelCase = self._tf_checkpoint
_lowerCamelCase = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
lowerCamelCase__ , self._config , self._pytorch_dump_output , lowerCamelCase__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 661 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCamelCase_( A__, A__, A__ ):
'''simple docstring'''
lowercase__ : List[Any] = [r'h\.\d+\.attn\.bias', r'h\.\d+\.attn\.masked_bias']
@register_to_config
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = 5_0_2_5_7 , lowerCamelCase__ = 1_0_2_4 , lowerCamelCase__ = 7_6_8 , lowerCamelCase__ = 1_2 , lowerCamelCase__ = 1_2 , lowerCamelCase__ = None , lowerCamelCase__ = "gelu_new" , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 1e-5 , lowerCamelCase__ = 0.0_2 , lowerCamelCase__ = True , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = False , ):
super().__init__()
_lowerCamelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
_lowerCamelCase = prefix_inner_dim
_lowerCamelCase = prefix_hidden_dim
_lowerCamelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_lowerCamelCase = (
nn.Linear(self.prefix_hidden_dim , lowerCamelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_lowerCamelCase = GPTaConfig(
vocab_size=lowerCamelCase__ , n_positions=lowerCamelCase__ , n_embd=lowerCamelCase__ , n_layer=lowerCamelCase__ , n_head=lowerCamelCase__ , n_inner=lowerCamelCase__ , activation_function=lowerCamelCase__ , resid_pdrop=lowerCamelCase__ , embd_pdrop=lowerCamelCase__ , attn_pdrop=lowerCamelCase__ , layer_norm_epsilon=lowerCamelCase__ , initializer_range=lowerCamelCase__ , scale_attn_weights=lowerCamelCase__ , use_cache=lowerCamelCase__ , scale_attn_by_inverse_layer_idx=lowerCamelCase__ , reorder_and_upcast_attn=lowerCamelCase__ , )
_lowerCamelCase = GPTaLMHeadModel(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , ):
_lowerCamelCase = self.transformer.transformer.wte(lowerCamelCase__ )
_lowerCamelCase = self.encode_prefix(lowerCamelCase__ )
_lowerCamelCase = self.decode_prefix(lowerCamelCase__ )
_lowerCamelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_lowerCamelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_lowerCamelCase = torch.cat((dummy_token, input_ids) , dim=1 )
_lowerCamelCase = self.transformer(inputs_embeds=lowerCamelCase__ , labels=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
return torch.zeros(lowerCamelCase__ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
return self.encode_prefix(lowerCamelCase__ )
@torch.no_grad()
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = torch.split(lowerCamelCase__ , 1 , dim=0 )
_lowerCamelCase = []
_lowerCamelCase = []
for feature in features:
_lowerCamelCase = self.decode_prefix(feature.to(lowerCamelCase__ ) ) # back to the clip feature
# Only support beam search for now
_lowerCamelCase , _lowerCamelCase = self.generate_beam(
input_embeds=lowerCamelCase__ , device=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_lowerCamelCase = torch.stack(lowerCamelCase__ )
_lowerCamelCase = torch.stack(lowerCamelCase__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def snake_case__ ( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__ = 5 , lowerCamelCase__ = 6_7 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = None , ):
_lowerCamelCase = eos_token_id
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = torch.ones(lowerCamelCase__ , device=lowerCamelCase__ , dtype=torch.int )
_lowerCamelCase = torch.zeros(lowerCamelCase__ , device=lowerCamelCase__ , dtype=torch.bool )
if input_embeds is not None:
_lowerCamelCase = input_embeds
else:
_lowerCamelCase = self.transformer.transformer.wte(lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
_lowerCamelCase = self.transformer(inputs_embeds=lowerCamelCase__ )
_lowerCamelCase = outputs.logits
_lowerCamelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_lowerCamelCase = logits.softmax(-1 ).log()
if scores is None:
_lowerCamelCase , _lowerCamelCase = logits.topk(lowerCamelCase__ , -1 )
_lowerCamelCase = generated.expand(lowerCamelCase__ , *generated.shape[1:] )
_lowerCamelCase , _lowerCamelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_lowerCamelCase = next_tokens
else:
_lowerCamelCase = tokens.expand(lowerCamelCase__ , *tokens.shape[1:] )
_lowerCamelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
_lowerCamelCase = -float(np.inf )
_lowerCamelCase = 0
_lowerCamelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_lowerCamelCase = scores_sum / seq_lengths[:, None]
_lowerCamelCase , _lowerCamelCase = scores_sum_average.view(-1 ).topk(lowerCamelCase__ , -1 )
_lowerCamelCase = next_tokens // scores_sum.shape[1]
_lowerCamelCase = seq_lengths[next_tokens_source]
_lowerCamelCase = next_tokens % scores_sum.shape[1]
_lowerCamelCase = next_tokens.unsqueeze(1 )
_lowerCamelCase = tokens[next_tokens_source]
_lowerCamelCase = torch.cat((tokens, next_tokens) , dim=1 )
_lowerCamelCase = generated[next_tokens_source]
_lowerCamelCase = scores_sum_average * seq_lengths
_lowerCamelCase = is_stopped[next_tokens_source]
_lowerCamelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_lowerCamelCase = torch.cat((generated, next_token_embed) , dim=1 )
_lowerCamelCase = is_stopped + next_tokens.eq(lowerCamelCase__ ).squeeze()
if is_stopped.all():
break
_lowerCamelCase = scores / seq_lengths
_lowerCamelCase = scores.argsort(descending=lowerCamelCase__ )
# tokens tensors are already padded to max_seq_length
_lowerCamelCase = [tokens[i] for i in order]
_lowerCamelCase = torch.stack(lowerCamelCase__ , dim=0 )
_lowerCamelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 661 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
lowercase__ : Any = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase__ : Tuple = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowercase__ : Any = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowercase__ : Any = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def snake_case__ ( self ):
_lowerCamelCase = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' )
_lowerCamelCase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCamelCase__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
_lowerCamelCase = text_classifier('''This is great !''' , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}] )
_lowerCamelCase = text_classifier(['''This is great !''', '''This is bad'''] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}],
[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}],
] , )
_lowerCamelCase = text_classifier('''This is great !''' , top_k=1 )
self.assertEqual(nested_simplify(lowerCamelCase__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
# Legacy behavior
_lowerCamelCase = text_classifier('''This is great !''' , return_all_scores=lowerCamelCase__ )
self.assertEqual(nested_simplify(lowerCamelCase__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
_lowerCamelCase = text_classifier('''This is great !''' , return_all_scores=lowerCamelCase__ )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , [[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}]] )
_lowerCamelCase = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=lowerCamelCase__ )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}],
[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}],
] , )
_lowerCamelCase = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=lowerCamelCase__ )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , [
{'''label''': '''LABEL_0''', '''score''': 0.5_0_4},
{'''label''': '''LABEL_0''', '''score''': 0.5_0_4},
] , )
@require_torch
def snake_case__ ( self ):
import torch
_lowerCamelCase = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' , device=torch.device('''cpu''' ) , )
_lowerCamelCase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCamelCase__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
@require_tf
def snake_case__ ( self ):
_lowerCamelCase = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''tf''' )
_lowerCamelCase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCamelCase__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
@slow
@require_torch
def snake_case__ ( self ):
_lowerCamelCase = pipeline('''text-classification''' )
_lowerCamelCase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCamelCase__ ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
_lowerCamelCase = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(lowerCamelCase__ ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
_lowerCamelCase = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(lowerCamelCase__ ) , [{'''label''': '''POSITIVE''', '''score''': 0.9_8_8}] )
@slow
@require_tf
def snake_case__ ( self ):
_lowerCamelCase = pipeline('''text-classification''' , framework='''tf''' )
_lowerCamelCase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCamelCase__ ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
_lowerCamelCase = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(lowerCamelCase__ ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
_lowerCamelCase = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(lowerCamelCase__ ) , [{'''label''': '''POSITIVE''', '''score''': 0.9_8_8}] )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = TextClassificationPipeline(model=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
_lowerCamelCase = '''HuggingFace is in'''
_lowerCamelCase = text_classifier(lowerCamelCase__ )
self.assertEqual(nested_simplify(lowerCamelCase__ ) , [{'''label''': ANY(lowerCamelCase__ ), '''score''': ANY(lowerCamelCase__ )}] )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
_lowerCamelCase = ['''HuggingFace is in ''', '''Paris is in France''']
_lowerCamelCase = text_classifier(lowerCamelCase__ )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , [{'''label''': ANY(lowerCamelCase__ ), '''score''': ANY(lowerCamelCase__ )}, {'''label''': ANY(lowerCamelCase__ ), '''score''': ANY(lowerCamelCase__ )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
_lowerCamelCase = text_classifier(lowerCamelCase__ , top_k=lowerCamelCase__ )
_lowerCamelCase = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , [[{'''label''': ANY(lowerCamelCase__ ), '''score''': ANY(lowerCamelCase__ )}] * N, [{'''label''': ANY(lowerCamelCase__ ), '''score''': ANY(lowerCamelCase__ )}] * N] , )
_lowerCamelCase = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''}
_lowerCamelCase = text_classifier(lowerCamelCase__ )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , {'''label''': ANY(lowerCamelCase__ ), '''score''': ANY(lowerCamelCase__ )} , )
self.assertTrue(outputs['''label'''] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
_lowerCamelCase = [['''HuggingFace is in ''', '''Paris is in France''']]
with self.assertRaises(lowerCamelCase__ ):
text_classifier(lowerCamelCase__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
_lowerCamelCase = text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , [{'''label''': ANY(lowerCamelCase__ ), '''score''': ANY(lowerCamelCase__ )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
| 661 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__SCREAMING_SNAKE_CASE : Optional[int] = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__SCREAMING_SNAKE_CASE : List[str] = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
__SCREAMING_SNAKE_CASE : Optional[Any] = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def lowerCAmelCase_( lowercase_ : Union[str, Any] ) -> str:
def remove_articles(lowercase_ : int ):
_lowerCamelCase = re.compile(r'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(lowercase_ , ''' ''' , lowercase_ )
def white_space_fix(lowercase_ : List[Any] ):
return " ".join(text.split() )
def remove_punc(lowercase_ : Dict ):
_lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase_ : Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase_ ) ) ) )
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Optional[Any] ) -> Union[str, Any]:
return int(normalize_answer(lowercase_ ) == normalize_answer(lowercase_ ) )
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Tuple ) -> Tuple:
_lowerCamelCase = [any(compute_exact(lowercase_ , lowercase_ ) for ref in refs ) for pred, refs in zip(lowercase_ , lowercase_ )]
return (sum(lowercase_ ) / len(lowercase_ )) * 1_00
def lowerCAmelCase_( lowercase_ : Tuple , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : str ) -> Optional[int]:
_lowerCamelCase = [rgram for rgrams in rgramslist for rgram in rgrams]
_lowerCamelCase = Counter(lowercase_ )
_lowerCamelCase = Counter(lowercase_ )
_lowerCamelCase = Counter()
for sgram, scount in sgramcounter.items():
_lowerCamelCase = scount * numref
_lowerCamelCase = Counter(lowercase_ )
_lowerCamelCase = Counter()
for cgram, ccount in cgramcounter.items():
_lowerCamelCase = ccount * numref
# KEEP
_lowerCamelCase = sgramcounter_rep & cgramcounter_rep
_lowerCamelCase = keepgramcounter_rep & rgramcounter
_lowerCamelCase = sgramcounter_rep & rgramcounter
_lowerCamelCase = 0
_lowerCamelCase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCamelCase = 1
_lowerCamelCase = 1
if len(lowercase_ ) > 0:
_lowerCamelCase = keeptmpscorea / len(lowercase_ )
if len(lowercase_ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_lowerCamelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_lowerCamelCase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_lowerCamelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_lowerCamelCase = sgramcounter_rep - cgramcounter_rep
_lowerCamelCase = delgramcounter_rep - rgramcounter
_lowerCamelCase = sgramcounter_rep - rgramcounter
_lowerCamelCase = 0
_lowerCamelCase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCamelCase = 1
if len(lowercase_ ) > 0:
_lowerCamelCase = deltmpscorea / len(lowercase_ )
# ADDITION
_lowerCamelCase = set(lowercase_ ) - set(lowercase_ )
_lowerCamelCase = set(lowercase_ ) & set(lowercase_ )
_lowerCamelCase = set(lowercase_ ) - set(lowercase_ )
_lowerCamelCase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCamelCase = 1
_lowerCamelCase = 1
if len(lowercase_ ) > 0:
_lowerCamelCase = addtmpscore / len(lowercase_ )
if len(lowercase_ ) > 0:
_lowerCamelCase = addtmpscore / len(lowercase_ )
_lowerCamelCase = 0
if addscore_precision > 0 or addscore_recall > 0:
_lowerCamelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : str ) -> List[str]:
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = ssent.split(''' ''' )
_lowerCamelCase = csent.split(''' ''' )
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
for rsent in rsents:
_lowerCamelCase = rsent.split(''' ''' )
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
ragramslist.append(lowercase_ )
for i in range(0 , len(lowercase_ ) - 1 ):
if i < len(lowercase_ ) - 1:
_lowerCamelCase = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(lowercase_ )
if i < len(lowercase_ ) - 2:
_lowerCamelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(lowercase_ )
if i < len(lowercase_ ) - 3:
_lowerCamelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(lowercase_ )
ragramslist.append(lowercase_ )
ragramslist.append(lowercase_ )
ragramslist.append(lowercase_ )
for i in range(0 , len(lowercase_ ) - 1 ):
if i < len(lowercase_ ) - 1:
_lowerCamelCase = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(lowercase_ )
if i < len(lowercase_ ) - 2:
_lowerCamelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(lowercase_ )
if i < len(lowercase_ ) - 3:
_lowerCamelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(lowercase_ )
for i in range(0 , len(lowercase_ ) - 1 ):
if i < len(lowercase_ ) - 1:
_lowerCamelCase = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(lowercase_ )
if i < len(lowercase_ ) - 2:
_lowerCamelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(lowercase_ )
if i < len(lowercase_ ) - 3:
_lowerCamelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(lowercase_ )
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = SARIngram(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = SARIngram(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = SARIngram(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = SARIngram(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
_lowerCamelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_lowerCamelCase = sum([delascore, delascore, delascore, delascore] ) / 4
_lowerCamelCase = sum([addascore, addascore, addascore, addascore] ) / 4
_lowerCamelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : bool = True , lowercase_ : str = "13a" , lowercase_ : bool = True ) -> int:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
_lowerCamelCase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_lowerCamelCase = sacrebleu.metrics.bleu._get_tokenizer(lowercase_ )()(lowercase_ )
else:
_lowerCamelCase = sacrebleu.TOKENIZERS[tokenizer]()(lowercase_ )
elif tokenizer == "moses":
_lowerCamelCase = sacremoses.MosesTokenizer().tokenize(lowercase_ , return_str=lowercase_ , escape=lowercase_ )
elif tokenizer == "penn":
_lowerCamelCase = sacremoses.MosesTokenizer().penn_tokenize(lowercase_ , return_str=lowercase_ )
else:
_lowerCamelCase = sentence
if not return_str:
_lowerCamelCase = normalized_sent.split()
return normalized_sent
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Any , lowercase_ : List[Any] ) -> Optional[int]:
if not (len(lowercase_ ) == len(lowercase_ ) == len(lowercase_ )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_lowerCamelCase = 0
for src, pred, refs in zip(lowercase_ , lowercase_ , lowercase_ ):
sari_score += SARIsent(normalize(lowercase_ ) , normalize(lowercase_ ) , [normalize(lowercase_ ) for sent in refs] )
_lowerCamelCase = sari_score / len(lowercase_ )
return 1_00 * sari_score
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Any , lowercase_ : List[Any]="exp" , lowercase_ : List[Any]=None , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=False , lowercase_ : List[Any]=False , ) -> Dict:
_lowerCamelCase = len(references[0] )
if any(len(lowercase_ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_lowerCamelCase = [[refs[i] for refs in references] for i in range(lowercase_ )]
_lowerCamelCase = sacrebleu.corpus_bleu(
lowercase_ , lowercase_ , smooth_method=lowercase_ , smooth_value=lowercase_ , force=lowercase_ , lowercase=lowercase_ , use_effective_order=lowercase_ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCamelCase_( datasets.Metric ):
'''simple docstring'''
def snake_case__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = {}
result.update({'''sari''': compute_sari(sources=lowerCamelCase__ , predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
result.update({'''exact''': compute_em(predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
return result
| 661 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowerCAmelCase_( lowercase_ : Tuple ) -> int:
if "model" in orig_key:
_lowerCamelCase = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
_lowerCamelCase = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
_lowerCamelCase = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
_lowerCamelCase = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
_lowerCamelCase = orig_key.split('''.''' )[0].split('''_''' )[-1]
_lowerCamelCase = orig_key.replace(F"""transformer_{layer_num}""" , F"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
_lowerCamelCase = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
_lowerCamelCase = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
_lowerCamelCase = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
_lowerCamelCase = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
_lowerCamelCase = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
_lowerCamelCase = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
_lowerCamelCase = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
_lowerCamelCase = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
_lowerCamelCase = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
_lowerCamelCase = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
_lowerCamelCase = '''yoso.''' + orig_key
return orig_key
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : str ) -> List[Any]:
for key in orig_state_dict.copy().keys():
_lowerCamelCase = orig_state_dict.pop(lowercase_ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
_lowerCamelCase = val
_lowerCamelCase = orig_state_dict['''cls.predictions.decoder.bias''']
_lowerCamelCase = torch.arange(lowercase_ ).expand((1, -1) ) + 2
return orig_state_dict
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[int]:
_lowerCamelCase = torch.load(lowercase_ , map_location='''cpu''' )['''model_state_dict''']
_lowerCamelCase = YosoConfig.from_json_file(lowercase_ )
_lowerCamelCase = YosoForMaskedLM(lowercase_ )
_lowerCamelCase = convert_checkpoint_helper(config.max_position_embeddings , lowercase_ )
print(model.load_state_dict(lowercase_ ) )
model.eval()
model.save_pretrained(lowercase_ )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 661 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 661 | 1 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__SCREAMING_SNAKE_CASE : str = parser.parse_args()
__SCREAMING_SNAKE_CASE : Tuple = '''cpu'''
__SCREAMING_SNAKE_CASE : str = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__SCREAMING_SNAKE_CASE : Optional[int] = '''path-to-your-trained-model'''
__SCREAMING_SNAKE_CASE : Any = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__SCREAMING_SNAKE_CASE : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(device)
# to channels last
__SCREAMING_SNAKE_CASE : Tuple = pipe.unet.to(memory_format=torch.channels_last)
__SCREAMING_SNAKE_CASE : Optional[int] = pipe.vae.to(memory_format=torch.channels_last)
__SCREAMING_SNAKE_CASE : Optional[int] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__SCREAMING_SNAKE_CASE : Any = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.randn(2, 4, 6_4, 6_4)
__SCREAMING_SNAKE_CASE : List[str] = torch.rand(1) * 9_9_9
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(2, 7_7, 7_6_8)
__SCREAMING_SNAKE_CASE : Union[str, Any] = (sample, timestep, encoder_hidden_status)
try:
__SCREAMING_SNAKE_CASE : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__SCREAMING_SNAKE_CASE : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__SCREAMING_SNAKE_CASE : Union[str, Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__SCREAMING_SNAKE_CASE : int = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__SCREAMING_SNAKE_CASE : Dict = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__SCREAMING_SNAKE_CASE : Any = 6_6_6
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device).manual_seed(seed)
__SCREAMING_SNAKE_CASE : List[Any] = {'''generator''': generator}
if args.steps is not None:
__SCREAMING_SNAKE_CASE : str = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__SCREAMING_SNAKE_CASE : Optional[int] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 661 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__SCREAMING_SNAKE_CASE : List[str] = {value: key for key, value in MORSE_CODE_DICT.items()}
def lowerCAmelCase_( lowercase_ : str ) -> str:
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def lowerCAmelCase_( lowercase_ : str ) -> str:
return "".join(REVERSE_DICT[char] for char in message.split() )
def lowerCAmelCase_( ) -> None:
_lowerCamelCase = '''Morse code here!'''
print(lowercase_ )
_lowerCamelCase = encrypt(lowercase_ )
print(lowercase_ )
_lowerCamelCase = decrypt(lowercase_ )
print(lowercase_ )
if __name__ == "__main__":
main()
| 661 | 1 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : str , lowercase_ : str ) -> int:
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError('''String lengths must match!''' )
_lowerCamelCase = 0
for chara, chara in zip(lowercase_ , lowercase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__SCREAMING_SNAKE_CASE : List[Any] = True
except (ImportError, AttributeError):
__SCREAMING_SNAKE_CASE : List[Any] = object
def lowerCAmelCase_( *lowercase_ : Dict , **lowercase_ : str ) -> str:
pass
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Any = logging.get_logger('''transformers-cli/serving''')
def lowerCAmelCase_( lowercase_ : Namespace ) -> List[Any]:
_lowerCamelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(lowercase_ , args.host , args.port , args.workers )
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : dict
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[str]
lowercase__ : Optional[List[int]]
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : str
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Any
class lowerCamelCase_( A__ ):
'''simple docstring'''
@staticmethod
def snake_case__ ( lowerCamelCase__ ):
_lowerCamelCase = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=lowerCamelCase__ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=lowerCamelCase__ , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=lowerCamelCase__ , default=8_8_8_8 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=lowerCamelCase__ , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=lowerCamelCase__ , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=lowerCamelCase__ , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=lowerCamelCase__ , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=lowerCamelCase__ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = pipeline
_lowerCamelCase = host
_lowerCamelCase = port
_lowerCamelCase = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F"""Serving model over {host}:{port}""" )
_lowerCamelCase = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''POST'''] , ),
] , timeout=6_0_0 , )
def snake_case__ ( self ):
run(self._app , host=self.host , port=self.port , workers=self.workers )
def snake_case__ ( self ):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def snake_case__ ( self , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) ):
try:
_lowerCamelCase = self._pipeline.tokenizer.tokenize(lowerCamelCase__ )
if return_ids:
_lowerCamelCase = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
return ServeTokenizeResult(tokens=lowerCamelCase__ , tokens_ids=lowerCamelCase__ )
else:
return ServeTokenizeResult(tokens=lowerCamelCase__ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'''model''': '''''', '''error''': str(lowerCamelCase__ )} )
def snake_case__ ( self , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , ):
try:
_lowerCamelCase = self._pipeline.tokenizer.decode(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return ServeDeTokenizeResult(model='''''' , text=lowerCamelCase__ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'''model''': '''''', '''error''': str(lowerCamelCase__ )} )
async def snake_case__ ( self , lowerCamelCase__=Body(lowerCamelCase__ , embed=lowerCamelCase__ ) ):
# Check we don't have empty string
if len(lowerCamelCase__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
_lowerCamelCase = self._pipeline(lowerCamelCase__ )
return ServeForwardResult(output=lowerCamelCase__ )
except Exception as e:
raise HTTPException(5_0_0 , {'''error''': str(lowerCamelCase__ )} )
| 661 | 1 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : list[int] , lowercase_ : list[int] ) -> tuple[float, float]:
# Check if the input is valid
if not len(lowercase_ ) == len(lowercase_ ) == 3:
raise ValueError('''Please enter a valid equation.''' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''' )
# Extract the coefficients
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = equationa
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = equationa
# Calculate the determinants of the matrices
_lowerCamelCase = aa * ba - aa * ba
_lowerCamelCase = ca * ba - ca * ba
_lowerCamelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''' )
else:
raise ValueError('''No solution. (Inconsistent system)''' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_lowerCamelCase = determinant_x / determinant
_lowerCamelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 661 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Dict ) -> List[Any]:
assert isinstance(lowercase_ , lowercase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : str ) -> List[Any]:
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCamelCase = JsonDatasetReader(lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read()
_check_json_dataset(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : int ) -> Optional[int]:
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowerCamelCase = features.copy() if features else default_expected_features
_lowerCamelCase = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase = JsonDatasetReader(lowercase_ , features=lowercase_ , cache_dir=lowercase_ ).read()
_check_json_dataset(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : List[Any] ) -> Tuple:
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
_lowerCamelCase = features.copy() if features else default_expected_features
_lowerCamelCase = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase = JsonDatasetReader(lowercase_ , features=lowercase_ , cache_dir=lowercase_ ).read()
assert isinstance(lowercase_ , lowercase_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Optional[int] ) -> List[str]:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
_lowerCamelCase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
_lowerCamelCase = features.copy()
_lowerCamelCase = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = JsonDatasetReader(lowercase_ , features=lowercase_ , cache_dir=lowercase_ ).read()
assert isinstance(lowercase_ , lowercase_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] ) -> int:
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowerCamelCase = JsonDatasetReader(lowercase_ , cache_dir=lowercase_ , split=lowercase_ ).read()
_check_json_dataset(lowercase_ , lowercase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Tuple ) -> Optional[int]:
if issubclass(lowercase_ , lowercase_ ):
_lowerCamelCase = jsonl_path
elif issubclass(lowercase_ , lowercase_ ):
_lowerCamelCase = [jsonl_path]
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowerCamelCase = JsonDatasetReader(lowercase_ , cache_dir=lowercase_ ).read()
_check_json_dataset(lowercase_ , lowercase_ )
def lowerCAmelCase_( lowercase_ : int , lowercase_ : List[str] , lowercase_ : str=("train",) ) -> List[str]:
assert isinstance(lowercase_ , lowercase_ )
for split in splits:
_lowerCamelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : int ) -> Dict:
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCamelCase = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read()
_check_json_datasetdict(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase_( lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] ) -> List[Any]:
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowerCamelCase = features.copy() if features else default_expected_features
_lowerCamelCase = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase = JsonDatasetReader({'''train''': jsonl_path} , features=lowercase_ , cache_dir=lowercase_ ).read()
_check_json_datasetdict(lowercase_ , lowercase_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase_( lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : List[str] ) -> Optional[Any]:
if split:
_lowerCamelCase = {split: jsonl_path}
else:
_lowerCamelCase = '''train'''
_lowerCamelCase = {'''train''': jsonl_path, '''test''': jsonl_path}
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowerCamelCase = JsonDatasetReader(lowercase_ , cache_dir=lowercase_ ).read()
_check_json_datasetdict(lowercase_ , lowercase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_( lowercase_ : Optional[int] ) -> Optional[Any]:
return json.load(lowercase_ )
def lowerCAmelCase_( lowercase_ : Tuple ) -> Tuple:
return [json.loads(lowercase_ ) for line in buffer]
class lowerCamelCase_:
'''simple docstring'''
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , lines=lowerCamelCase__ ).write()
buffer.seek(0 )
_lowerCamelCase = load_json_function(lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert isinstance(exported_content[0] , lowerCamelCase__ )
assert len(lowerCamelCase__ ) == 1_0
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , lines=lowerCamelCase__ , orient=lowerCamelCase__ ).write()
buffer.seek(0 )
_lowerCamelCase = load_json(lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCamelCase__ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(lowerCamelCase__ ) == 1_0
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , lines=lowerCamelCase__ , num_proc=2 ).write()
buffer.seek(0 )
_lowerCamelCase = load_json_function(lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert isinstance(exported_content[0] , lowerCamelCase__ )
assert len(lowerCamelCase__ ) == 1_0
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , lines=lowerCamelCase__ , orient=lowerCamelCase__ , num_proc=2 ).write()
buffer.seek(0 )
_lowerCamelCase = load_json(lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCamelCase__ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(lowerCamelCase__ ) == 1_0
def snake_case__ ( self , lowerCamelCase__ ):
with pytest.raises(lowerCamelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = tmp_path_factory.mktemp('''data''' ) / F"""test.json.{extension}"""
_lowerCamelCase = str(shared_datadir / F"""test_file.json.{extension}""" )
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , compression=lowerCamelCase__ ).write()
with fsspec.open(lowerCamelCase__ , '''rb''' , compression='''infer''' ) as f:
_lowerCamelCase = f.read()
with fsspec.open(lowerCamelCase__ , '''rb''' , compression='''infer''' ) as f:
_lowerCamelCase = f.read()
assert exported_content == original_content
| 661 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 661 |
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=3 , lowerCamelCase__=3_2 , lowerCamelCase__=3 , lowerCamelCase__=1_0 , lowerCamelCase__=[8, 1_6, 3_2, 6_4] , lowerCamelCase__=[1, 1, 2, 1] , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="relu" , lowerCamelCase__=3 , lowerCamelCase__=None , lowerCamelCase__=["stage2", "stage3", "stage4"] , lowerCamelCase__=[2, 3, 4] , lowerCamelCase__=1 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = num_channels
_lowerCamelCase = embeddings_size
_lowerCamelCase = hidden_sizes
_lowerCamelCase = depths
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_act
_lowerCamelCase = num_labels
_lowerCamelCase = scope
_lowerCamelCase = len(lowerCamelCase__ )
_lowerCamelCase = out_features
_lowerCamelCase = out_indices
_lowerCamelCase = num_groups
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels )
_lowerCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = BitModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = BitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = BitBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_lowerCamelCase = None
_lowerCamelCase = BitBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Dict = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowercase__ : Any = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : Union[str, Any] = False
lowercase__ : List[Any] = False
lowercase__ : Any = False
lowercase__ : List[str] = False
lowercase__ : Any = False
def snake_case__ ( self ):
_lowerCamelCase = BitModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def snake_case__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self ):
return
@unittest.skip(reason='''Bit does not output attentions''' )
def snake_case__ ( self ):
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(config=lowerCamelCase__ )
for name, module in model.named_modules():
if isinstance(lowerCamelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def snake_case__ ( self ):
def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_lowerCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCamelCase = layer_type
_lowerCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def snake_case__ ( self ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = BitModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase_( ) -> List[Any]:
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def snake_case__ ( self ):
_lowerCamelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase__ )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_lowerCamelCase = model(**lowerCamelCase__ )
# verify the logits
_lowerCamelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowerCamelCase = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
@require_torch
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = (BitBackbone,) if is_torch_available() else ()
lowercase__ : Tuple = BitConfig
lowercase__ : Any = False
def snake_case__ ( self ):
_lowerCamelCase = BitModelTester(self )
| 661 | 1 |
"""simple docstring"""
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 661 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=2 , lowerCamelCase__=2_4 , lowerCamelCase__=1_6 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=3_2 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1_0 , lowerCamelCase__=0.0_2 , lowerCamelCase__=None , lowerCamelCase__=2 , lowerCamelCase__=2 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = patch_size
_lowerCamelCase = max_length
_lowerCamelCase = num_mel_bins
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = scope
_lowerCamelCase = frequency_stride
_lowerCamelCase = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCamelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_lowerCamelCase = (self.max_length - self.patch_size) // self.time_stride + 1
_lowerCamelCase = frequency_out_dimension * time_out_dimension
_lowerCamelCase = num_patches + 2
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = self.get_config()
return config, input_values, labels
def snake_case__ ( self ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = ASTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = config_and_inputs
_lowerCamelCase = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Any = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowercase__ : List[str] = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
lowercase__ : int = False
lowercase__ : str = False
lowercase__ : Union[str, Any] = False
lowercase__ : List[str] = False
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def snake_case__ ( self ):
_lowerCamelCase = ASTModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=3_7 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''input_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@slow
def snake_case__ ( self ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = ASTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase_( ) -> str:
_lowerCamelCase = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
_lowerCamelCase , _lowerCamelCase = torchaudio.load(lowercase_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def snake_case__ ( self ):
_lowerCamelCase = self.default_feature_extractor
_lowerCamelCase = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(lowerCamelCase__ )
_lowerCamelCase = self.default_feature_extractor
_lowerCamelCase , _lowerCamelCase = prepare_audio()
_lowerCamelCase = audio.squeeze().numpy()
_lowerCamelCase = feature_extractor(lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_lowerCamelCase = model(**lowerCamelCase__ )
# verify the logits
_lowerCamelCase = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowerCamelCase = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 661 | 1 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__SCREAMING_SNAKE_CASE : Optional[int] = TypeVar('''T''')
class lowerCamelCase_( Generic[T] ):
'''simple docstring'''
lowercase__ : deque[T] # Cache store of keys
lowercase__ : set[T] # References of the keys in cache
lowercase__ : int = 10 # Maximum capacity of cache
def __init__( self , lowerCamelCase__ ):
_lowerCamelCase = deque()
_lowerCamelCase = set()
if not n:
_lowerCamelCase = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
_lowerCamelCase = n
def snake_case__ ( self , lowerCamelCase__ ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_lowerCamelCase = self.dq_store.pop()
self.key_reference.remove(lowerCamelCase__ )
else:
self.dq_store.remove(lowerCamelCase__ )
self.dq_store.appendleft(lowerCamelCase__ )
self.key_reference.add(lowerCamelCase__ )
def snake_case__ ( self ):
for k in self.dq_store:
print(lowerCamelCase__ )
def __repr__( self ):
return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 661 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowerCamelCase_:
'''simple docstring'''
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
return None
class lowerCamelCase_:
'''simple docstring'''
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
return None
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
lowercase__ : Tuple = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def snake_case__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase__ , '''tf''' , 1_2 , **lowerCamelCase__ )
@require_torch
@slow
def snake_case__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase__ , '''pt''' , 1_2 , **lowerCamelCase__ )
@require_torch
@slow
def snake_case__ ( self ):
from transformers import BertModel
_lowerCamelCase = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(lowerCamelCase__ ) )
vocab_file.flush()
_lowerCamelCase = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
_lowerCamelCase = BertModel(BertConfig(vocab_size=len(lowerCamelCase__ ) ) )
model.save_pretrained(lowerCamelCase__ )
self._test_export(lowerCamelCase__ , '''pt''' , 1_2 , lowerCamelCase__ )
@require_tf
@slow
def snake_case__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_lowerCamelCase = self._test_export(lowerCamelCase__ , '''tf''' , 1_2 , **lowerCamelCase__ )
_lowerCamelCase = quantize(Path(lowerCamelCase__ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase__ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def snake_case__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_lowerCamelCase = self._test_export(lowerCamelCase__ , '''pt''' , 1_2 , **lowerCamelCase__ )
_lowerCamelCase = quantize(lowerCamelCase__ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase__ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
_lowerCamelCase = Path(lowerCamelCase__ ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
return path
except Exception as e:
self.fail(lowerCamelCase__ )
@require_torch
@require_tokenizers
@slow
def snake_case__ ( self ):
from transformers import BertModel
_lowerCamelCase = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
_lowerCamelCase = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(lowerCamelCase__ , lowerCamelCase__ , '''pt''' )
@require_tf
@require_tokenizers
@slow
def snake_case__ ( self ):
from transformers import TFBertModel
_lowerCamelCase = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
_lowerCamelCase = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(lowerCamelCase__ , lowerCamelCase__ , '''tf''' )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = FeatureExtractionPipeline(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = infer_shapes(lowerCamelCase__ , lowerCamelCase__ )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , lowerCamelCase__ )
self.assertSequenceEqual(variable_names[3:] , lowerCamelCase__ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def snake_case__ ( self ):
_lowerCamelCase = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
_lowerCamelCase = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
_lowerCamelCase , _lowerCamelCase = ensure_valid_input(FuncContiguousArgs() , lowerCamelCase__ , lowerCamelCase__ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase__ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase__ ) , set(lowerCamelCase__ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase__ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
_lowerCamelCase , _lowerCamelCase = ensure_valid_input(FuncNonContiguousArgs() , lowerCamelCase__ , lowerCamelCase__ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase__ ) , 1 )
self.assertEqual(len(lowerCamelCase__ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def snake_case__ ( self ):
_lowerCamelCase = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
| 661 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_( lowercase_ : int | str ) -> bool:
_lowerCamelCase = str(lowercase_ )
return n == n[::-1]
def lowerCAmelCase_( lowercase_ : int = 1_00_00_00 ) -> int:
_lowerCamelCase = 0
for i in range(1 , lowercase_ ):
if is_palindrome(lowercase_ ) and is_palindrome(bin(lowercase_ ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 661 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = {str(digit): digit**5 for digit in range(1_0)}
def lowerCAmelCase_( lowercase_ : int ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowercase_ ) )
def lowerCAmelCase_( ) -> int:
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(lowercase_ ) )
if __name__ == "__main__":
print(solution())
| 661 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 661 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
__SCREAMING_SNAKE_CASE : str = tuple[int, int]
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = vertices
_lowerCamelCase = {
(min(lowerCamelCase__ ), max(lowerCamelCase__ )): weight for edge, weight in edges.items()
}
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_lowerCamelCase = weight
def snake_case__ ( self ):
_lowerCamelCase = Graph({min(self.vertices )} , {} )
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
while len(subgraph.vertices ) < len(self.vertices ):
_lowerCamelCase = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_lowerCamelCase = edge
_lowerCamelCase = weight
subgraph.add_edge(lowerCamelCase__ , lowerCamelCase__ )
return subgraph
def lowerCAmelCase_( lowercase_ : str = "p107_network.txt" ) -> int:
_lowerCamelCase = os.path.abspath(os.path.dirname(lowercase_ ) )
_lowerCamelCase = os.path.join(lowercase_ , lowercase_ )
_lowerCamelCase = {}
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
with open(lowercase_ ) as f:
_lowerCamelCase = f.read().strip().split('''\n''' )
_lowerCamelCase = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(lowercase_ ) ):
for edgea in range(lowercase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
_lowerCamelCase = int(adjaceny_matrix[edgea][edgea] )
_lowerCamelCase = Graph(set(range(len(lowercase_ ) ) ) , lowercase_ )
_lowerCamelCase = graph.prims_algorithm()
_lowerCamelCase = sum(graph.edges.values() )
_lowerCamelCase = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 661 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__SCREAMING_SNAKE_CASE : List[str] = {
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = ['''ConvNextFeatureExtractor''']
__SCREAMING_SNAKE_CASE : List[str] = ['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 661 |
"""simple docstring"""
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCAmelCase_( lowercase_ : int = 2_00_00_00 ) -> int:
_lowerCamelCase = [0]
_lowerCamelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_lowerCamelCase = 0
# the area corresponding to the grid that gives the product closest to target
_lowerCamelCase = 0
# an estimate of b, using the quadratic formula
_lowerCamelCase = 42
# the largest integer less than b_estimate
_lowerCamelCase = 42
# the largest integer less than b_estimate
_lowerCamelCase = 42
# the triangle number corresponding to b_floor
_lowerCamelCase = 42
# the triangle number corresponding to b_ceil
_lowerCamelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_lowerCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_lowerCamelCase = floor(lowercase_ )
_lowerCamelCase = ceil(lowercase_ )
_lowerCamelCase = triangle_numbers[b_floor]
_lowerCamelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_lowerCamelCase = triangle_b_first_guess * triangle_a
_lowerCamelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_lowerCamelCase = triangle_b_second_guess * triangle_a
_lowerCamelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 661 | 1 |
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(A__ )
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , **lowerCamelCase__ ):
super().__init__(**lowerCamelCase__ )
requires_backends(self , '''vision''' )
requires_backends(self , '''torch''' )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(lowerCamelCase__ )
def snake_case__ ( self , **lowerCamelCase__ ):
_lowerCamelCase = {}
_lowerCamelCase = {}
_lowerCamelCase = {}
# preprocess args
if "points_per_batch" in kwargs:
_lowerCamelCase = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
_lowerCamelCase = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
_lowerCamelCase = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
_lowerCamelCase = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
_lowerCamelCase = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
_lowerCamelCase = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
_lowerCamelCase = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
_lowerCamelCase = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
_lowerCamelCase = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
_lowerCamelCase = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
_lowerCamelCase = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
_lowerCamelCase = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , lowerCamelCase__ , *lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ ):
return super().__call__(lowerCamelCase__ , *lowerCamelCase__ , num_workers=lowerCamelCase__ , batch_size=lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=6_4 , lowerCamelCase__ = 0 , lowerCamelCase__ = 5_1_2 / 1_5_0_0 , lowerCamelCase__ = 3_2 , lowerCamelCase__ = 1 , ):
_lowerCamelCase = load_image(lowerCamelCase__ )
_lowerCamelCase = self.image_processor.size['''longest_edge''']
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self.image_processor.generate_crop_boxes(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = self.image_processor(images=lowerCamelCase__ , return_tensors='''pt''' )
with self.device_placement():
if self.framework == "pt":
_lowerCamelCase = self.get_inference_context()
with inference_context():
_lowerCamelCase = self._ensure_tensor_on_device(lowerCamelCase__ , device=self.device )
_lowerCamelCase = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''' ) )
_lowerCamelCase = image_embeddings
_lowerCamelCase = grid_points.shape[1]
_lowerCamelCase = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''' )
for i in range(0 , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = grid_points[:, i : i + points_per_batch, :, :]
_lowerCamelCase = input_labels[:, i : i + points_per_batch]
_lowerCamelCase = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=0.8_8 , lowerCamelCase__=0.9_5 , lowerCamelCase__=0 , lowerCamelCase__=1 , ):
_lowerCamelCase = model_inputs.pop('''input_boxes''' )
_lowerCamelCase = model_inputs.pop('''is_last''' )
_lowerCamelCase = model_inputs.pop('''original_sizes''' ).tolist()
_lowerCamelCase = model_inputs.pop('''reshaped_input_sizes''' ).tolist()
_lowerCamelCase = self.model(**lowerCamelCase__ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_lowerCamelCase = model_outputs['''pred_masks''']
_lowerCamelCase = self.image_processor.post_process_masks(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , binarize=lowerCamelCase__ )
_lowerCamelCase = model_outputs['''iou_scores''']
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=0.7 , ):
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores''' ) )
all_masks.extend(model_output.pop('''masks''' ) )
all_boxes.append(model_output.pop('''boxes''' ) )
_lowerCamelCase = torch.cat(lowerCamelCase__ )
_lowerCamelCase = torch.cat(lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self.image_processor.post_process_for_mask_generation(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = defaultdict(lowerCamelCase__ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCamelCase__ )
_lowerCamelCase = {}
if output_rle_mask:
_lowerCamelCase = rle_mask
if output_bboxes_mask:
_lowerCamelCase = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 661 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 661 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Dict = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 661 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
__SCREAMING_SNAKE_CASE : str = {
'''distilbert-base-uncased''': 5_1_2,
'''distilbert-base-uncased-distilled-squad''': 5_1_2,
'''distilbert-base-cased''': 5_1_2,
'''distilbert-base-cased-distilled-squad''': 5_1_2,
'''distilbert-base-german-cased''': 5_1_2,
'''distilbert-base-multilingual-cased''': 5_1_2,
}
__SCREAMING_SNAKE_CASE : str = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[Any] = VOCAB_FILES_NAMES
lowercase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : str = PRETRAINED_INIT_CONFIGURATION
lowercase__ : int = ['input_ids', 'attention_mask']
lowercase__ : Tuple = DistilBertTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__="[UNK]" , lowerCamelCase__="[SEP]" , lowerCamelCase__="[PAD]" , lowerCamelCase__="[CLS]" , lowerCamelCase__="[MASK]" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ):
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
_lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCamelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCamelCase__ ) != tokenize_chinese_chars
):
_lowerCamelCase = getattr(lowerCamelCase__ , normalizer_state.pop('''type''' ) )
_lowerCamelCase = do_lower_case
_lowerCamelCase = strip_accents
_lowerCamelCase = tokenize_chinese_chars
_lowerCamelCase = normalizer_class(**lowerCamelCase__ )
_lowerCamelCase = do_lower_case
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=None ):
_lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 661 | 1 |
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Tuple = False
def lowerCAmelCase_( lowercase_ : Namespace ) -> Union[str, Any]:
return TrainCommand(lowercase_ )
class lowerCamelCase_( A__ ):
'''simple docstring'''
@staticmethod
def snake_case__ ( lowerCamelCase__ ):
_lowerCamelCase = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' )
train_parser.add_argument(
'''--train_data''' , type=lowerCamelCase__ , required=lowerCamelCase__ , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=lowerCamelCase__ , default=0 , help='''Column of the dataset csv file with example labels.''' )
train_parser.add_argument(
'''--column_text''' , type=lowerCamelCase__ , default=1 , help='''Column of the dataset csv file with example texts.''' )
train_parser.add_argument(
'''--column_id''' , type=lowerCamelCase__ , default=2 , help='''Column of the dataset csv file with example ids.''' )
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' )
train_parser.add_argument('''--validation_data''' , type=lowerCamelCase__ , default='''''' , help='''path to validation dataset.''' )
train_parser.add_argument(
'''--validation_split''' , type=lowerCamelCase__ , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=lowerCamelCase__ , default='''./''' , help='''path to saved the trained model.''' )
train_parser.add_argument(
'''--task''' , type=lowerCamelCase__ , default='''text_classification''' , help='''Task to train the model on.''' )
train_parser.add_argument(
'''--model''' , type=lowerCamelCase__ , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' )
train_parser.add_argument('''--train_batch_size''' , type=lowerCamelCase__ , default=3_2 , help='''Batch size for training.''' )
train_parser.add_argument('''--valid_batch_size''' , type=lowerCamelCase__ , default=6_4 , help='''Batch size for validation.''' )
train_parser.add_argument('''--learning_rate''' , type=lowerCamelCase__ , default=3e-5 , help='''Learning rate.''' )
train_parser.add_argument('''--adam_epsilon''' , type=lowerCamelCase__ , default=1e-08 , help='''Epsilon for Adam optimizer.''' )
train_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self , lowerCamelCase__ ):
_lowerCamelCase = logging.get_logger('''transformers-cli/training''' )
_lowerCamelCase = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=lowerCamelCase__ )
_lowerCamelCase = args.output
_lowerCamelCase = args.column_label
_lowerCamelCase = args.column_text
_lowerCamelCase = args.column_id
self.logger.info(F"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
_lowerCamelCase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"""Loading dataset from {args.train_data}""" )
_lowerCamelCase = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCamelCase = None
if args.validation_data:
self.logger.info(F"""Loading validation dataset from {args.validation_data}""" )
_lowerCamelCase = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCamelCase = args.validation_split
_lowerCamelCase = args.train_batch_size
_lowerCamelCase = args.valid_batch_size
_lowerCamelCase = args.learning_rate
_lowerCamelCase = args.adam_epsilon
def snake_case__ ( self ):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def snake_case__ ( self ):
raise NotImplementedError
def snake_case__ ( self ):
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 661 |
"""simple docstring"""
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__SCREAMING_SNAKE_CASE : str = 2_9_9_7_9_2_4_5_8
# Symbols
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = symbols('''ct x y z''')
def lowerCAmelCase_( lowercase_ : float ) -> float:
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCAmelCase_( lowercase_ : float ) -> float:
return 1 / sqrt(1 - beta(lowercase_ ) ** 2 )
def lowerCAmelCase_( lowercase_ : float ) -> np.ndarray:
return np.array(
[
[gamma(lowercase_ ), -gamma(lowercase_ ) * beta(lowercase_ ), 0, 0],
[-gamma(lowercase_ ) * beta(lowercase_ ), gamma(lowercase_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCAmelCase_( lowercase_ : float , lowercase_ : np.ndarray | None = None ) -> np.ndarray:
# Ensure event is not empty
if event is None:
_lowerCamelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(lowercase_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__SCREAMING_SNAKE_CASE : List[str] = transform(2_9_9_7_9_2_4_5)
print('''Example of four vector: ''')
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
__SCREAMING_SNAKE_CASE : Tuple = {ct: c, x: 1, y: 1, z: 1}
__SCREAMING_SNAKE_CASE : int = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 661 | 1 |
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
__SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
__SCREAMING_SNAKE_CASE : Optional[Any] = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
__SCREAMING_SNAKE_CASE : List[Any] = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
__SCREAMING_SNAKE_CASE : List[str] = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"""python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 661 |
"""simple docstring"""
from __future__ import annotations
__SCREAMING_SNAKE_CASE : Dict = 1.6_0_2_1e-1_9 # units = C
def lowerCAmelCase_( lowercase_ : float , lowercase_ : float , lowercase_ : float , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCAmelCase_( lowercase_ : int = 2_00_00_00 ) -> int:
_lowerCamelCase = [0]
_lowerCamelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_lowerCamelCase = 0
# the area corresponding to the grid that gives the product closest to target
_lowerCamelCase = 0
# an estimate of b, using the quadratic formula
_lowerCamelCase = 42
# the largest integer less than b_estimate
_lowerCamelCase = 42
# the largest integer less than b_estimate
_lowerCamelCase = 42
# the triangle number corresponding to b_floor
_lowerCamelCase = 42
# the triangle number corresponding to b_ceil
_lowerCamelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_lowerCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_lowerCamelCase = floor(lowercase_ )
_lowerCamelCase = ceil(lowercase_ )
_lowerCamelCase = triangle_numbers[b_floor]
_lowerCamelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_lowerCamelCase = triangle_b_first_guess * triangle_a
_lowerCamelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_lowerCamelCase = triangle_b_second_guess * triangle_a
_lowerCamelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 661 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6},
}
}
_lowerCamelCase = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_2_8,
'''task_specific_params.summarization.min_length''': 1_2,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_4_2,
'''task_specific_params.summarization_cnn.min_length''': 5_6,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 6_2,
'''task_specific_params.summarization_xsum.min_length''': 1_1,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(lowerCamelCase__ ) , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , x.transpose() ) )
_lowerCamelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
_lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , transpose(lowerCamelCase__ ).numpy() ) )
_lowerCamelCase = np.random.randn(3 , 4 , 5 )
_lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , transpose(lowerCamelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , transpose(lowerCamelCase__ ).numpy() ) )
_lowerCamelCase = np.random.randn(3 , 4 , 5 )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , transpose(lowerCamelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
_lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , np.asarray(transpose(lowerCamelCase__ ) ) ) )
_lowerCamelCase = np.random.randn(3 , 4 , 5 )
_lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , np.asarray(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) ) ) )
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , np.reshape(lowerCamelCase__ , (4, 3) ) ) )
_lowerCamelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (1_2, 5) ) , np.reshape(lowerCamelCase__ , (1_2, 5) ) ) )
@require_torch
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
_lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , reshape(lowerCamelCase__ , (4, 3) ).numpy() ) )
_lowerCamelCase = np.random.randn(3 , 4 , 5 )
_lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (1_2, 5) ) , reshape(lowerCamelCase__ , (1_2, 5) ).numpy() ) )
@require_tf
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , reshape(lowerCamelCase__ , (4, 3) ).numpy() ) )
_lowerCamelCase = np.random.randn(3 , 4 , 5 )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (1_2, 5) ) , reshape(lowerCamelCase__ , (1_2, 5) ).numpy() ) )
@require_flax
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
_lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , np.asarray(reshape(lowerCamelCase__ , (4, 3) ) ) ) )
_lowerCamelCase = np.random.randn(3 , 4 , 5 )
_lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (1_2, 5) ) , np.asarray(reshape(lowerCamelCase__ , (1_2, 5) ) ) ) )
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , np.squeeze(lowerCamelCase__ ) ) )
_lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , np.squeeze(lowerCamelCase__ , axis=2 ) ) )
@require_torch
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(1 , 3 , 4 )
_lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , squeeze(lowerCamelCase__ ).numpy() ) )
_lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
_lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , squeeze(lowerCamelCase__ , axis=2 ).numpy() ) )
@require_tf
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(1 , 3 , 4 )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , squeeze(lowerCamelCase__ ).numpy() ) )
_lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , squeeze(lowerCamelCase__ , axis=2 ).numpy() ) )
@require_flax
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(1 , 3 , 4 )
_lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , np.asarray(squeeze(lowerCamelCase__ ) ) ) )
_lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
_lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , np.asarray(squeeze(lowerCamelCase__ , axis=2 ) ) ) )
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , np.expand_dims(lowerCamelCase__ , axis=1 ) ) )
@require_torch
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
_lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , expand_dims(lowerCamelCase__ , axis=1 ).numpy() ) )
@require_tf
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , expand_dims(lowerCamelCase__ , axis=1 ).numpy() ) )
@require_flax
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
_lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , np.asarray(expand_dims(lowerCamelCase__ , axis=1 ) ) ) )
| 661 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
__SCREAMING_SNAKE_CASE : str = {
'''distilbert-base-uncased''': 5_1_2,
'''distilbert-base-uncased-distilled-squad''': 5_1_2,
'''distilbert-base-cased''': 5_1_2,
'''distilbert-base-cased-distilled-squad''': 5_1_2,
'''distilbert-base-german-cased''': 5_1_2,
'''distilbert-base-multilingual-cased''': 5_1_2,
}
__SCREAMING_SNAKE_CASE : str = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[Any] = VOCAB_FILES_NAMES
lowercase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : str = PRETRAINED_INIT_CONFIGURATION
lowercase__ : int = ['input_ids', 'attention_mask']
lowercase__ : Tuple = DistilBertTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__="[UNK]" , lowerCamelCase__="[SEP]" , lowerCamelCase__="[PAD]" , lowerCamelCase__="[CLS]" , lowerCamelCase__="[MASK]" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ):
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
_lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCamelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCamelCase__ ) != tokenize_chinese_chars
):
_lowerCamelCase = getattr(lowerCamelCase__ , normalizer_state.pop('''type''' ) )
_lowerCamelCase = do_lower_case
_lowerCamelCase = strip_accents
_lowerCamelCase = tokenize_chinese_chars
_lowerCamelCase = normalizer_class(**lowerCamelCase__ )
_lowerCamelCase = do_lower_case
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=None ):
_lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 661 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__SCREAMING_SNAKE_CASE : Tuple = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__SCREAMING_SNAKE_CASE : str = {'''facebook/blenderbot-3B''': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCAmelCase_( ) -> str:
_lowerCamelCase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
_lowerCamelCase = bs[:]
_lowerCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase_ )
cs.append(2**8 + n )
n += 1
_lowerCamelCase = [chr(lowercase_ ) for n in cs]
return dict(zip(lowercase_ , lowercase_ ) )
def lowerCAmelCase_( lowercase_ : str ) -> Dict:
_lowerCamelCase = set()
_lowerCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCamelCase = char
return pairs
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase__ : int = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Tuple = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="replace" , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=False , **lowerCamelCase__ , ):
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else bos_token
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else sep_token
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cls_token
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else unk_token
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
with open(lowerCamelCase__ , encoding='''utf-8''' ) as vocab_handle:
_lowerCamelCase = json.load(lowerCamelCase__ )
_lowerCamelCase = {v: k for k, v in self.encoder.items()}
_lowerCamelCase = errors # how to handle errors in decoding
_lowerCamelCase = bytes_to_unicode()
_lowerCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase__ , encoding='''utf-8''' ) as merges_handle:
_lowerCamelCase = merges_handle.read().split('''\n''' )[1:-1]
_lowerCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
_lowerCamelCase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
_lowerCamelCase = {}
_lowerCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCamelCase = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def snake_case__ ( self ):
return len(self.encoder )
def snake_case__ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case__ ( self , lowerCamelCase__ ):
if token in self.cache:
return self.cache[token]
_lowerCamelCase = tuple(lowerCamelCase__ )
_lowerCamelCase = get_pairs(lowerCamelCase__ )
if not pairs:
return token
while True:
_lowerCamelCase = min(lowerCamelCase__ , key=lambda lowerCamelCase__ : self.bpe_ranks.get(lowerCamelCase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCamelCase , _lowerCamelCase = bigram
_lowerCamelCase = []
_lowerCamelCase = 0
while i < len(lowerCamelCase__ ):
try:
_lowerCamelCase = word.index(lowerCamelCase__ , lowerCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCamelCase = j
if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCamelCase = tuple(lowerCamelCase__ )
_lowerCamelCase = new_word
if len(lowerCamelCase__ ) == 1:
break
else:
_lowerCamelCase = get_pairs(lowerCamelCase__ )
_lowerCamelCase = ''' '''.join(lowerCamelCase__ )
_lowerCamelCase = word
return word
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = []
for token in re.findall(self.pat , lowerCamelCase__ ):
_lowerCamelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase__ ).split(''' ''' ) )
return bpe_tokens
def snake_case__ ( self , lowerCamelCase__ ):
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) )
def snake_case__ ( self , lowerCamelCase__ ):
return self.decoder.get(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = ''''''.join(lowerCamelCase__ )
_lowerCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__ ) + '''\n''' )
_lowerCamelCase = 0
with open(lowerCamelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
_lowerCamelCase = token_index
writer.write(''' '''.join(lowerCamelCase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=False , **lowerCamelCase__ ):
_lowerCamelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase__ ) > 0 and not text[0].isspace()):
_lowerCamelCase = ''' ''' + text
return (text, kwargs)
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
return token_ids_a + [self.eos_token_id]
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(lowerCamelCase__ )
_lowerCamelCase = ''' '''.join(lowerCamelCase__ )
_lowerCamelCase = self.encode(lowerCamelCase__ )
if len(lowerCamelCase__ ) > self.model_max_length:
_lowerCamelCase = input_ids[-self.model_max_length :]
logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 661 | 1 |
"""simple docstring"""
from statistics import mean
import numpy as np
def lowerCAmelCase_( lowercase_ : list , lowercase_ : list , lowercase_ : list , lowercase_ : int ) -> list:
_lowerCamelCase = 0
# Number of processes finished
_lowerCamelCase = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
_lowerCamelCase = [0] * no_of_process
# List to include calculation results
_lowerCamelCase = [0] * no_of_process
# Sort by arrival time.
_lowerCamelCase = [burst_time[i] for i in np.argsort(lowercase_ )]
_lowerCamelCase = [process_name[i] for i in np.argsort(lowercase_ )]
arrival_time.sort()
while no_of_process > finished_process_count:
_lowerCamelCase = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
_lowerCamelCase = arrival_time[i]
_lowerCamelCase = 0
# Index showing the location of the process being performed
_lowerCamelCase = 0
# Saves the current response ratio.
_lowerCamelCase = 0
for i in range(0 , lowercase_ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
_lowerCamelCase = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
_lowerCamelCase = temp
_lowerCamelCase = i
# Calculate the turn around time
_lowerCamelCase = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
_lowerCamelCase = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def lowerCAmelCase_( lowercase_ : list , lowercase_ : list , lowercase_ : list , lowercase_ : int ) -> list:
_lowerCamelCase = [0] * no_of_process
for i in range(0 , lowercase_ ):
_lowerCamelCase = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = 5
__SCREAMING_SNAKE_CASE : str = ['''A''', '''B''', '''C''', '''D''', '''E''']
__SCREAMING_SNAKE_CASE : int = [1, 2, 3, 4, 5]
__SCREAMING_SNAKE_CASE : List[str] = [1, 2, 3, 4, 5]
__SCREAMING_SNAKE_CASE : List[str] = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"""
F"""{turn_around_time[i]}\t\t\t{waiting_time[i]}"""
)
print(F"""average waiting time : {mean(waiting_time):.5f}""")
print(F"""average turn around time : {mean(turn_around_time):.5f}""")
| 661 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Dict = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Union[str, Any] = XLMRobertaTokenizer
lowercase__ : Optional[int] = XLMRobertaTokenizerFast
lowercase__ : List[str] = True
lowercase__ : Union[str, Any] = True
def snake_case__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase = XLMRobertaTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self ):
_lowerCamelCase = '''<pad>'''
_lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCamelCase__ ) , 1_0_0_2 )
def snake_case__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_2 )
def snake_case__ ( self ):
_lowerCamelCase = XLMRobertaTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
_lowerCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_lowerCamelCase = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def snake_case__ ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCamelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = tokenizer_r.save_pretrained(lowerCamelCase__ )
_lowerCamelCase = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
_lowerCamelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase__ , lowerCamelCase__ )
# Checks everything loads correctly in the same way
_lowerCamelCase = tokenizer_r.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase__ )
# Save tokenizer rust, legacy_format=True
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = tokenizer_r.save_pretrained(lowerCamelCase__ , legacy_format=lowerCamelCase__ )
_lowerCamelCase = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase__ , lowerCamelCase__ )
# Checks everything loads correctly in the same way
_lowerCamelCase = tokenizer_r.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
# Save tokenizer rust, legacy_format=False
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = tokenizer_r.save_pretrained(lowerCamelCase__ , legacy_format=lowerCamelCase__ )
_lowerCamelCase = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCamelCase = tokenizer_r.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
@cached_property
def snake_case__ ( self ):
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def snake_case__ ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase__ , f.name )
_lowerCamelCase = XLMRobertaTokenizer(f.name , keep_accents=lowerCamelCase__ )
_lowerCamelCase = pickle.dumps(lowerCamelCase__ )
pickle.loads(lowerCamelCase__ )
def snake_case__ ( self ):
if not self.test_rust_tokenizer:
return
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_rust_tokenizer()
_lowerCamelCase = '''I was born in 92000, and this is falsé.'''
_lowerCamelCase = tokenizer.tokenize(lowerCamelCase__ )
_lowerCamelCase = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
_lowerCamelCase = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = self.get_rust_tokenizer()
_lowerCamelCase = tokenizer.encode(lowerCamelCase__ )
_lowerCamelCase = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def snake_case__ ( self ):
_lowerCamelCase = '''Hello World!'''
_lowerCamelCase = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def snake_case__ ( self ):
_lowerCamelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
_lowerCamelCase = [
0,
3_2_9_3,
8_3,
1_0,
4_5_5_2,
4_9_8_9,
7_9_8_6,
6_7_8,
1_0,
5_9_1_5,
1_1_1,
1_7_9_4_5_9,
1_2_4_8_5_0,
4,
6_0_4_4,
2_3_7,
1_2,
6,
5,
6,
4,
6_7_8_0,
7_0_5,
1_5,
1_3_8_8,
4_4,
3_7_8,
1_0_1_1_4,
7_1_1,
1_5_2,
2_0,
6,
5,
2_2_3_7_6,
6_4_2,
1_2_2_1,
1_5_1_9_0,
3_4_1_5_3,
4_5_0,
5_6_0_8,
9_5_9,
1_1_1_9,
5_7_7_0_2,
1_3_6,
1_8_6,
4_7,
1_0_9_8,
2_9_3_6_7,
4_7,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_0_4_4,
2_3_7,
6_2_8_4,
5_0_9_0_1,
5_2_8,
3_1,
9_0,
3_4,
9_2_7,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def snake_case__ ( self ):
# fmt: off
_lowerCamelCase = {'''input_ids''': [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 661 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE : Tuple = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 661 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : int = 10 ) -> str:
if not isinstance(lowercase_ , lowercase_ ) or n < 0:
raise ValueError('''Invalid input''' )
_lowerCamelCase = 10**n
_lowerCamelCase = 2_84_33 * (pow(2 , 7_83_04_57 , lowercase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(1_0) = }""")
| 661 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = tf.convert_to_tensor(
[
[
8.2_2_2_0_9_9_1, # 3rd highest value; idx. 0
-0.5_6_2_0_0_4_4,
5.2_3_2_2_9_7_5_2,
4.0_3_8_6_3_9_3,
-6.8_7_9_8_3_7_8,
-0.5_4_7_8_5_8_0_2,
-3.2_0_1_2_1_5_3,
2.9_2_7_7_7_1_7_6,
1.8_8_1_7_1_9_5_3,
7.3_5_3_4_1_2_7_6, # 5th highest value; idx. 9
8.4_3_2_0_7_8_3_3, # 2nd highest value; idx. 10
-9.8_5_7_1_1_8_3_6,
-5.9_6_2_0_9_2_3_6,
-1.1_3_0_3_9_1_6_1,
-7.1_1_1_5_2_9_4,
-0.8_3_6_9_6_3_3,
-5.3_1_8_6_4_0_8,
7.0_6_4_2_7_4_0_7,
0.8_1_3_6_9_3_4_4,
-0.8_2_0_2_3_8_1_7,
-5.9_1_7_9_7_9_6,
0.5_8_8_1_3_4_4_3,
-6.9_9_7_7_8_4_3_8,
4.7_1_5_5_1_1_8_9,
-0.1_8_7_7_1_6_3_7,
7.4_4_0_2_0_7_5_9, # 4th highest value; idx. 25
9.3_8_4_5_0_9_8_7, # 1st highest value; idx. 26
2.1_2_6_6_2_9_4_1,
-9.3_2_5_6_2_0_3_8,
2.3_5_6_5_2_5_2_2,
], # cummulative prob of 5 highest values <= 0.6
[
0.5_8_4_2_5_5_1_8,
4.5_3_1_3_9_2_3_8,
-5.5_7_5_1_0_4_6_4,
-6.2_8_0_3_0_6_9_9,
-7.1_9_5_2_9_5_0_3,
-4.0_2_1_2_2_5_5_1,
1.3_9_3_3_7_0_3_7,
-6.0_6_7_0_7_0_5_7,
1.5_9_4_8_0_5_1_7,
-9.6_4_3_1_1_9,
0.0_3_9_0_7_7_9_9,
0.6_7_2_3_1_7_6_2,
-8.8_8_2_0_6_7_2_6,
6.2_7_1_1_5_9_2_2, # 4th highest value; idx. 13
2.2_8_5_2_0_7_2_3,
4.8_2_7_6_7_5_0_6,
4.3_0_4_2_1_3_6_8,
8.8_2_7_5_3_1_3, # 2nd highest value; idx. 17
5.4_4_0_2_9_9_5_8, # 5th highest value; idx. 18
-4.4_7_3_5_7_9_4,
7.3_8_5_7_9_5_3_6, # 3rd highest value; idx. 20
-2.9_1_0_5_1_6_6_3,
2.6_1_9_4_6_0_7_7,
-2.5_6_7_4_7_6_2,
-9.4_8_9_5_9_3_0_2,
-4.0_2_9_2_2_6_4_5,
-1.3_5_4_1_6_9_1_8,
9.6_7_7_0_2_3_2_3, # 1st highest value; idx. 27
-5.8_9_4_7_8_5_5_3,
1.8_5_3_7_0_4_6_7,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
_lowerCamelCase = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
_lowerCamelCase = tf.convert_to_tensor(
[8.2_2_2_0_9_9, 7.3_5_3_4_1_2_6, 8.4_3_2_0_7_8, 7.4_4_0_2_0_7_5, 9.3_8_4_5_1, 6.2_7_1_1_5_9, 8.8_2_7_5_3_1, 5.4_4_0_2_9_9_5, 7.3_8_5_7_9_5_6, 9.6_7_7_0_2_3] , dtype=tf.floataa , ) # expected non filtered values as noted above
_lowerCamelCase = tf_top_k_top_p_filtering(lowerCamelCase__ , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 )
_lowerCamelCase = output[output != -float('''inf''' )]
_lowerCamelCase = tf.cast(
tf.where(tf.not_equal(lowerCamelCase__ , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(lowerCamelCase__ , lowerCamelCase__ , rtol=1e-12 )
tf.debugging.assert_equal(lowerCamelCase__ , lowerCamelCase__ )
@require_tf
class lowerCamelCase_( unittest.TestCase, A__ ):
'''simple docstring'''
if is_tf_available():
lowercase__ : Optional[Any] = {
'AutoModelForCausalLM': TFAutoModelForCausalLM,
'AutoModelForSpeechSeq2Seq': TFAutoModelForSpeechSeqaSeq,
'AutoModelForSeq2SeqLM': TFAutoModelForSeqaSeqLM,
'AutoModelForVision2Seq': TFAutoModelForVisionaSeq,
'LogitsProcessorList': TFLogitsProcessorList,
'MinLengthLogitsProcessor': TFMinLengthLogitsProcessor,
'create_tensor_fn': tf.convert_to_tensor,
'floats_tensor': floats_tensor,
'return_tensors': 'tf',
}
@slow
def snake_case__ ( self ):
# TF-only test: tf.saved_model export
_lowerCamelCase = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase = 2
_lowerCamelCase = 2
class lowerCamelCase_( tf.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
super(lowerCamelCase__ , self ).__init__()
_lowerCamelCase = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=lowerCamelCase__ , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = self.model.generate(
input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ , max_new_tokens=lowerCamelCase__ , return_dict_in_generate=lowerCamelCase__ , )
return {"sequences": outputs["sequences"]}
_lowerCamelCase = [[2, 0], [1_0_2, 1_0_3]]
_lowerCamelCase = [[1, 0], [1, 1]]
_lowerCamelCase = DummyModel(model=lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(lowerCamelCase__ , lowerCamelCase__ , signatures={'''serving_default''': dummy_model.serving} )
_lowerCamelCase = tf.saved_model.load(lowerCamelCase__ ).signatures['''serving_default''']
for batch_size in range(1 , len(lowerCamelCase__ ) + 1 ):
_lowerCamelCase = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
_lowerCamelCase = serving_func(**lowerCamelCase__ )['''sequences''']
_lowerCamelCase = test_model.generate(**lowerCamelCase__ , max_new_tokens=lowerCamelCase__ )
tf.debugging.assert_equal(lowerCamelCase__ , lowerCamelCase__ )
@slow
def snake_case__ ( self ):
# TF-only test: tf.saved_model export
_lowerCamelCase = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase = 1
_lowerCamelCase = 2
class lowerCamelCase_( tf.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
super(lowerCamelCase__ , self ).__init__()
_lowerCamelCase = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=lowerCamelCase__ , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = self.model.generate(
input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ , max_new_tokens=lowerCamelCase__ , return_dict_in_generate=lowerCamelCase__ , )
return {"sequences": outputs["sequences"]}
_lowerCamelCase = [[2], [1_0_2, 1_0_3]]
_lowerCamelCase = [[1], [1, 1]]
_lowerCamelCase = DummyModel(model=lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(lowerCamelCase__ , lowerCamelCase__ , signatures={'''serving_default''': dummy_model.serving} )
_lowerCamelCase = tf.saved_model.load(lowerCamelCase__ ).signatures['''serving_default''']
for input_row in range(len(lowerCamelCase__ ) ):
_lowerCamelCase = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
_lowerCamelCase = serving_func(**lowerCamelCase__ )['''sequences''']
_lowerCamelCase = test_model.generate(**lowerCamelCase__ , max_new_tokens=lowerCamelCase__ )
tf.debugging.assert_equal(lowerCamelCase__ , lowerCamelCase__ )
@slow
@require_tensorflow_text
def snake_case__ ( self ):
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=lowerCamelCase__ )
class lowerCamelCase_( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self ):
super().__init__()
_lowerCamelCase = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(lowerCamelCase__ , '''spiece.model''' ) , '''rb''' ).read() )
_lowerCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def snake_case__ ( self , lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ):
_lowerCamelCase = self.tokenizer.tokenize(lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase = text.pad_model_inputs(
lowerCamelCase__ , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id )
_lowerCamelCase = self.model.generate(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
return self.tokenizer.detokenize(lowerCamelCase__ )
_lowerCamelCase = CompleteSentenceTransformer()
_lowerCamelCase = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
_lowerCamelCase = complete_model(lowerCamelCase__ )
_lowerCamelCase = tf.keras.Model(lowerCamelCase__ , lowerCamelCase__ )
keras_model.save(lowerCamelCase__ )
def snake_case__ ( self ):
# Has PT equivalent: this test relies on random sampling
_lowerCamelCase = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 1_0,
'''temperature''': 0.7,
}
_lowerCamelCase = 1_4
_lowerCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase = '''Hello, my dog is cute and'''
_lowerCamelCase = tokenizer(lowerCamelCase__ , return_tensors='''tf''' )
_lowerCamelCase = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
_lowerCamelCase = model.generate(**lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
_lowerCamelCase = [6_3_8, 1_9_8]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
_lowerCamelCase = model.generate(**lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def snake_case__ ( self ):
# Has PT equivalent: ample use of framework-specific code
_lowerCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
_lowerCamelCase = '''Hugging Face is a technology company based in New York and Paris.'''
_lowerCamelCase = bart_tokenizer(lowerCamelCase__ , return_tensors='''tf''' ).input_ids
_lowerCamelCase = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
_lowerCamelCase = bart_model.generate(lowerCamelCase__ ).numpy()
class lowerCamelCase_( A__ ):
'''simple docstring'''
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
return super().call(lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
_lowerCamelCase = bart_model.generate(lowerCamelCase__ , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(lowerCamelCase__ , lowerCamelCase__ ) )
class lowerCamelCase_( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def snake_case__ ( self , lowerCamelCase__ , **lowerCamelCase__ ):
return super().call(lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = FakeEncoder(bart_model.config , bart_model.model.shared )
_lowerCamelCase = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
_lowerCamelCase = bart_model.generate(lowerCamelCase__ ).numpy()
with self.assertRaises(lowerCamelCase__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(lowerCamelCase__ , foo='''bar''' )
| 661 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 661 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
__SCREAMING_SNAKE_CASE : List[str] = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''allenai/longformer-base-4096''': 4_0_9_6,
'''allenai/longformer-large-4096''': 4_0_9_6,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4_0_9_6,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4_0_9_6,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4_0_9_6,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCAmelCase_( ) -> Optional[int]:
_lowerCamelCase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
_lowerCamelCase = bs[:]
_lowerCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase_ )
cs.append(2**8 + n )
n += 1
_lowerCamelCase = [chr(lowercase_ ) for n in cs]
return dict(zip(lowercase_ , lowercase_ ) )
def lowerCAmelCase_( lowercase_ : Any ) -> Dict:
_lowerCamelCase = set()
_lowerCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCamelCase = char
return pairs
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Any = VOCAB_FILES_NAMES
lowercase__ : Any = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="replace" , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=False , **lowerCamelCase__ , ):
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else bos_token
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else sep_token
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cls_token
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else unk_token
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
with open(lowerCamelCase__ , encoding='''utf-8''' ) as vocab_handle:
_lowerCamelCase = json.load(lowerCamelCase__ )
_lowerCamelCase = {v: k for k, v in self.encoder.items()}
_lowerCamelCase = errors # how to handle errors in decoding
_lowerCamelCase = bytes_to_unicode()
_lowerCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase__ , encoding='''utf-8''' ) as merges_handle:
_lowerCamelCase = merges_handle.read().split('''\n''' )[1:-1]
_lowerCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
_lowerCamelCase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
_lowerCamelCase = {}
_lowerCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCamelCase = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def snake_case__ ( self ):
return len(self.encoder )
def snake_case__ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case__ ( self , lowerCamelCase__ ):
if token in self.cache:
return self.cache[token]
_lowerCamelCase = tuple(lowerCamelCase__ )
_lowerCamelCase = get_pairs(lowerCamelCase__ )
if not pairs:
return token
while True:
_lowerCamelCase = min(lowerCamelCase__ , key=lambda lowerCamelCase__ : self.bpe_ranks.get(lowerCamelCase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCamelCase , _lowerCamelCase = bigram
_lowerCamelCase = []
_lowerCamelCase = 0
while i < len(lowerCamelCase__ ):
try:
_lowerCamelCase = word.index(lowerCamelCase__ , lowerCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCamelCase = j
if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCamelCase = tuple(lowerCamelCase__ )
_lowerCamelCase = new_word
if len(lowerCamelCase__ ) == 1:
break
else:
_lowerCamelCase = get_pairs(lowerCamelCase__ )
_lowerCamelCase = ''' '''.join(lowerCamelCase__ )
_lowerCamelCase = word
return word
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = []
for token in re.findall(self.pat , lowerCamelCase__ ):
_lowerCamelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase__ ).split(''' ''' ) )
return bpe_tokens
def snake_case__ ( self , lowerCamelCase__ ):
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) )
def snake_case__ ( self , lowerCamelCase__ ):
return self.decoder.get(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = ''''''.join(lowerCamelCase__ )
_lowerCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__ ) + '''\n''' )
_lowerCamelCase = 0
with open(lowerCamelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
_lowerCamelCase = token_index
writer.write(''' '''.join(lowerCamelCase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
_lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=False , **lowerCamelCase__ ):
_lowerCamelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase__ ) > 0 and not text[0].isspace()):
_lowerCamelCase = ''' ''' + text
return (text, kwargs)
| 661 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : str , lowercase_ : str ) -> float:
def get_matched_characters(lowercase_ : str , lowercase_ : str ) -> str:
_lowerCamelCase = []
_lowerCamelCase = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_lowerCamelCase = int(max(0 , i - limit ) )
_lowerCamelCase = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowercase_ )
_lowerCamelCase = F"""{_stra[0:_stra.index(lowercase_ )]} {_stra[_stra.index(lowercase_ ) + 1:]}"""
return "".join(lowercase_ )
# matching characters
_lowerCamelCase = get_matched_characters(lowercase_ , lowercase_ )
_lowerCamelCase = get_matched_characters(lowercase_ , lowercase_ )
_lowerCamelCase = len(lowercase_ )
# transposition
_lowerCamelCase = (
len([(ca, ca) for ca, ca in zip(lowercase_ , lowercase_ ) if ca != ca] ) // 2
)
if not match_count:
_lowerCamelCase = 0.0
else:
_lowerCamelCase = (
1
/ 3
* (
match_count / len(lowercase_ )
+ match_count / len(lowercase_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_lowerCamelCase = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world'''))
| 661 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=9_9 , lowerCamelCase__=3_2 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=1_6 , lowerCamelCase__=2 , lowerCamelCase__=0.0_2 , lowerCamelCase__=4 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_attention_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = num_choices
def snake_case__ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_attention_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowerCamelCase__ , )
return config, input_ids, attention_mask
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self ):
_lowerCamelCase = FlaxDistilBertModelTester(self )
@slow
def snake_case__ ( self ):
for model_class_name in self.all_model_classes:
_lowerCamelCase = model_class_name.from_pretrained('''distilbert-base-uncased''' )
_lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__ ( self ):
_lowerCamelCase = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
_lowerCamelCase = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0]
_lowerCamelCase = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , lowerCamelCase__ )
_lowerCamelCase = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase__ , atol=1e-4 ) )
| 661 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCamelCase_( A__, A__, A__ ):
'''simple docstring'''
lowercase__ : List[Any] = [r'h\.\d+\.attn\.bias', r'h\.\d+\.attn\.masked_bias']
@register_to_config
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = 5_0_2_5_7 , lowerCamelCase__ = 1_0_2_4 , lowerCamelCase__ = 7_6_8 , lowerCamelCase__ = 1_2 , lowerCamelCase__ = 1_2 , lowerCamelCase__ = None , lowerCamelCase__ = "gelu_new" , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 1e-5 , lowerCamelCase__ = 0.0_2 , lowerCamelCase__ = True , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = False , ):
super().__init__()
_lowerCamelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
_lowerCamelCase = prefix_inner_dim
_lowerCamelCase = prefix_hidden_dim
_lowerCamelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_lowerCamelCase = (
nn.Linear(self.prefix_hidden_dim , lowerCamelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_lowerCamelCase = GPTaConfig(
vocab_size=lowerCamelCase__ , n_positions=lowerCamelCase__ , n_embd=lowerCamelCase__ , n_layer=lowerCamelCase__ , n_head=lowerCamelCase__ , n_inner=lowerCamelCase__ , activation_function=lowerCamelCase__ , resid_pdrop=lowerCamelCase__ , embd_pdrop=lowerCamelCase__ , attn_pdrop=lowerCamelCase__ , layer_norm_epsilon=lowerCamelCase__ , initializer_range=lowerCamelCase__ , scale_attn_weights=lowerCamelCase__ , use_cache=lowerCamelCase__ , scale_attn_by_inverse_layer_idx=lowerCamelCase__ , reorder_and_upcast_attn=lowerCamelCase__ , )
_lowerCamelCase = GPTaLMHeadModel(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , ):
_lowerCamelCase = self.transformer.transformer.wte(lowerCamelCase__ )
_lowerCamelCase = self.encode_prefix(lowerCamelCase__ )
_lowerCamelCase = self.decode_prefix(lowerCamelCase__ )
_lowerCamelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_lowerCamelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_lowerCamelCase = torch.cat((dummy_token, input_ids) , dim=1 )
_lowerCamelCase = self.transformer(inputs_embeds=lowerCamelCase__ , labels=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
return torch.zeros(lowerCamelCase__ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
return self.encode_prefix(lowerCamelCase__ )
@torch.no_grad()
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = torch.split(lowerCamelCase__ , 1 , dim=0 )
_lowerCamelCase = []
_lowerCamelCase = []
for feature in features:
_lowerCamelCase = self.decode_prefix(feature.to(lowerCamelCase__ ) ) # back to the clip feature
# Only support beam search for now
_lowerCamelCase , _lowerCamelCase = self.generate_beam(
input_embeds=lowerCamelCase__ , device=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_lowerCamelCase = torch.stack(lowerCamelCase__ )
_lowerCamelCase = torch.stack(lowerCamelCase__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def snake_case__ ( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__ = 5 , lowerCamelCase__ = 6_7 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = None , ):
_lowerCamelCase = eos_token_id
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = torch.ones(lowerCamelCase__ , device=lowerCamelCase__ , dtype=torch.int )
_lowerCamelCase = torch.zeros(lowerCamelCase__ , device=lowerCamelCase__ , dtype=torch.bool )
if input_embeds is not None:
_lowerCamelCase = input_embeds
else:
_lowerCamelCase = self.transformer.transformer.wte(lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
_lowerCamelCase = self.transformer(inputs_embeds=lowerCamelCase__ )
_lowerCamelCase = outputs.logits
_lowerCamelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_lowerCamelCase = logits.softmax(-1 ).log()
if scores is None:
_lowerCamelCase , _lowerCamelCase = logits.topk(lowerCamelCase__ , -1 )
_lowerCamelCase = generated.expand(lowerCamelCase__ , *generated.shape[1:] )
_lowerCamelCase , _lowerCamelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_lowerCamelCase = next_tokens
else:
_lowerCamelCase = tokens.expand(lowerCamelCase__ , *tokens.shape[1:] )
_lowerCamelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
_lowerCamelCase = -float(np.inf )
_lowerCamelCase = 0
_lowerCamelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_lowerCamelCase = scores_sum / seq_lengths[:, None]
_lowerCamelCase , _lowerCamelCase = scores_sum_average.view(-1 ).topk(lowerCamelCase__ , -1 )
_lowerCamelCase = next_tokens // scores_sum.shape[1]
_lowerCamelCase = seq_lengths[next_tokens_source]
_lowerCamelCase = next_tokens % scores_sum.shape[1]
_lowerCamelCase = next_tokens.unsqueeze(1 )
_lowerCamelCase = tokens[next_tokens_source]
_lowerCamelCase = torch.cat((tokens, next_tokens) , dim=1 )
_lowerCamelCase = generated[next_tokens_source]
_lowerCamelCase = scores_sum_average * seq_lengths
_lowerCamelCase = is_stopped[next_tokens_source]
_lowerCamelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_lowerCamelCase = torch.cat((generated, next_token_embed) , dim=1 )
_lowerCamelCase = is_stopped + next_tokens.eq(lowerCamelCase__ ).squeeze()
if is_stopped.all():
break
_lowerCamelCase = scores / seq_lengths
_lowerCamelCase = scores.argsort(descending=lowerCamelCase__ )
# tokens tensors are already padded to max_seq_length
_lowerCamelCase = [tokens[i] for i in order]
_lowerCamelCase = torch.stack(lowerCamelCase__ , dim=0 )
_lowerCamelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 661 | 1 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : list , lowercase_ : list , lowercase_ : int , lowercase_ : int , lowercase_ : int ) -> int:
if index == number_of_items:
return 0
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ , index + 1 )
if weights[index] <= max_weight:
_lowerCamelCase = values[index] + knapsack(
lowercase_ , lowercase_ , lowercase_ , max_weight - weights[index] , index + 1 )
return max(lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__SCREAMING_SNAKE_CASE : Optional[int] = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__SCREAMING_SNAKE_CASE : List[str] = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
__SCREAMING_SNAKE_CASE : Optional[Any] = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def lowerCAmelCase_( lowercase_ : Union[str, Any] ) -> str:
def remove_articles(lowercase_ : int ):
_lowerCamelCase = re.compile(r'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(lowercase_ , ''' ''' , lowercase_ )
def white_space_fix(lowercase_ : List[Any] ):
return " ".join(text.split() )
def remove_punc(lowercase_ : Dict ):
_lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase_ : Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase_ ) ) ) )
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Optional[Any] ) -> Union[str, Any]:
return int(normalize_answer(lowercase_ ) == normalize_answer(lowercase_ ) )
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Tuple ) -> Tuple:
_lowerCamelCase = [any(compute_exact(lowercase_ , lowercase_ ) for ref in refs ) for pred, refs in zip(lowercase_ , lowercase_ )]
return (sum(lowercase_ ) / len(lowercase_ )) * 1_00
def lowerCAmelCase_( lowercase_ : Tuple , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : str ) -> Optional[int]:
_lowerCamelCase = [rgram for rgrams in rgramslist for rgram in rgrams]
_lowerCamelCase = Counter(lowercase_ )
_lowerCamelCase = Counter(lowercase_ )
_lowerCamelCase = Counter()
for sgram, scount in sgramcounter.items():
_lowerCamelCase = scount * numref
_lowerCamelCase = Counter(lowercase_ )
_lowerCamelCase = Counter()
for cgram, ccount in cgramcounter.items():
_lowerCamelCase = ccount * numref
# KEEP
_lowerCamelCase = sgramcounter_rep & cgramcounter_rep
_lowerCamelCase = keepgramcounter_rep & rgramcounter
_lowerCamelCase = sgramcounter_rep & rgramcounter
_lowerCamelCase = 0
_lowerCamelCase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCamelCase = 1
_lowerCamelCase = 1
if len(lowercase_ ) > 0:
_lowerCamelCase = keeptmpscorea / len(lowercase_ )
if len(lowercase_ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_lowerCamelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_lowerCamelCase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_lowerCamelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_lowerCamelCase = sgramcounter_rep - cgramcounter_rep
_lowerCamelCase = delgramcounter_rep - rgramcounter
_lowerCamelCase = sgramcounter_rep - rgramcounter
_lowerCamelCase = 0
_lowerCamelCase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCamelCase = 1
if len(lowercase_ ) > 0:
_lowerCamelCase = deltmpscorea / len(lowercase_ )
# ADDITION
_lowerCamelCase = set(lowercase_ ) - set(lowercase_ )
_lowerCamelCase = set(lowercase_ ) & set(lowercase_ )
_lowerCamelCase = set(lowercase_ ) - set(lowercase_ )
_lowerCamelCase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCamelCase = 1
_lowerCamelCase = 1
if len(lowercase_ ) > 0:
_lowerCamelCase = addtmpscore / len(lowercase_ )
if len(lowercase_ ) > 0:
_lowerCamelCase = addtmpscore / len(lowercase_ )
_lowerCamelCase = 0
if addscore_precision > 0 or addscore_recall > 0:
_lowerCamelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : str ) -> List[str]:
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = ssent.split(''' ''' )
_lowerCamelCase = csent.split(''' ''' )
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
for rsent in rsents:
_lowerCamelCase = rsent.split(''' ''' )
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
ragramslist.append(lowercase_ )
for i in range(0 , len(lowercase_ ) - 1 ):
if i < len(lowercase_ ) - 1:
_lowerCamelCase = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(lowercase_ )
if i < len(lowercase_ ) - 2:
_lowerCamelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(lowercase_ )
if i < len(lowercase_ ) - 3:
_lowerCamelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(lowercase_ )
ragramslist.append(lowercase_ )
ragramslist.append(lowercase_ )
ragramslist.append(lowercase_ )
for i in range(0 , len(lowercase_ ) - 1 ):
if i < len(lowercase_ ) - 1:
_lowerCamelCase = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(lowercase_ )
if i < len(lowercase_ ) - 2:
_lowerCamelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(lowercase_ )
if i < len(lowercase_ ) - 3:
_lowerCamelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(lowercase_ )
for i in range(0 , len(lowercase_ ) - 1 ):
if i < len(lowercase_ ) - 1:
_lowerCamelCase = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(lowercase_ )
if i < len(lowercase_ ) - 2:
_lowerCamelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(lowercase_ )
if i < len(lowercase_ ) - 3:
_lowerCamelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(lowercase_ )
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = SARIngram(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = SARIngram(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = SARIngram(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = SARIngram(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
_lowerCamelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_lowerCamelCase = sum([delascore, delascore, delascore, delascore] ) / 4
_lowerCamelCase = sum([addascore, addascore, addascore, addascore] ) / 4
_lowerCamelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : bool = True , lowercase_ : str = "13a" , lowercase_ : bool = True ) -> int:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
_lowerCamelCase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_lowerCamelCase = sacrebleu.metrics.bleu._get_tokenizer(lowercase_ )()(lowercase_ )
else:
_lowerCamelCase = sacrebleu.TOKENIZERS[tokenizer]()(lowercase_ )
elif tokenizer == "moses":
_lowerCamelCase = sacremoses.MosesTokenizer().tokenize(lowercase_ , return_str=lowercase_ , escape=lowercase_ )
elif tokenizer == "penn":
_lowerCamelCase = sacremoses.MosesTokenizer().penn_tokenize(lowercase_ , return_str=lowercase_ )
else:
_lowerCamelCase = sentence
if not return_str:
_lowerCamelCase = normalized_sent.split()
return normalized_sent
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Any , lowercase_ : List[Any] ) -> Optional[int]:
if not (len(lowercase_ ) == len(lowercase_ ) == len(lowercase_ )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_lowerCamelCase = 0
for src, pred, refs in zip(lowercase_ , lowercase_ , lowercase_ ):
sari_score += SARIsent(normalize(lowercase_ ) , normalize(lowercase_ ) , [normalize(lowercase_ ) for sent in refs] )
_lowerCamelCase = sari_score / len(lowercase_ )
return 1_00 * sari_score
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Any , lowercase_ : List[Any]="exp" , lowercase_ : List[Any]=None , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=False , lowercase_ : List[Any]=False , ) -> Dict:
_lowerCamelCase = len(references[0] )
if any(len(lowercase_ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_lowerCamelCase = [[refs[i] for refs in references] for i in range(lowercase_ )]
_lowerCamelCase = sacrebleu.corpus_bleu(
lowercase_ , lowercase_ , smooth_method=lowercase_ , smooth_value=lowercase_ , force=lowercase_ , lowercase=lowercase_ , use_effective_order=lowercase_ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCamelCase_( datasets.Metric ):
'''simple docstring'''
def snake_case__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = {}
result.update({'''sari''': compute_sari(sources=lowerCamelCase__ , predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
result.update({'''exact''': compute_em(predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
return result
| 661 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__SCREAMING_SNAKE_CASE : List[str] = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : str = 'facebook/nllb-200-distilled-600M'
lowercase__ : Optional[int] = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
lowercase__ : int = 'translator'
lowercase__ : Union[str, Any] = AutoTokenizer
lowercase__ : List[str] = AutoModelForSeqaSeqLM
lowercase__ : List[str] = LANGUAGE_CODES
lowercase__ : Tuple = ['text', 'text', 'text']
lowercase__ : Union[str, Any] = ['text']
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if src_lang not in self.lang_to_code:
raise ValueError(F"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(F"""{tgt_lang} is not a supported language.""" )
_lowerCamelCase = self.lang_to_code[src_lang]
_lowerCamelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCamelCase__ , return_tensors='''pt''' , src_lang=lowerCamelCase__ , tgt_lang=lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
return self.model.generate(**lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCamelCase__ )
| 661 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 661 | 1 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : int ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__SCREAMING_SNAKE_CASE : List[str] = {value: key for key, value in MORSE_CODE_DICT.items()}
def lowerCAmelCase_( lowercase_ : str ) -> str:
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def lowerCAmelCase_( lowercase_ : str ) -> str:
return "".join(REVERSE_DICT[char] for char in message.split() )
def lowerCAmelCase_( ) -> None:
_lowerCamelCase = '''Morse code here!'''
print(lowercase_ )
_lowerCamelCase = encrypt(lowercase_ )
print(lowercase_ )
_lowerCamelCase = decrypt(lowercase_ )
print(lowercase_ )
if __name__ == "__main__":
main()
| 661 | 1 |
"""simple docstring"""
from collections.abc import Sequence
def lowerCAmelCase_( lowercase_ : Sequence[float] , lowercase_ : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(lowercase_ ) )
def lowerCAmelCase_( lowercase_ : Sequence[float] , lowercase_ : float ) -> float:
_lowerCamelCase = 0.0
for coeff in reversed(lowercase_ ):
_lowerCamelCase = result * x + coeff
return result
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = (0.0, 0.0, 5.0, 9.3, 7.0)
__SCREAMING_SNAKE_CASE : Tuple = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 661 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__SCREAMING_SNAKE_CASE : List[Any] = True
except (ImportError, AttributeError):
__SCREAMING_SNAKE_CASE : List[Any] = object
def lowerCAmelCase_( *lowercase_ : Dict , **lowercase_ : str ) -> str:
pass
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Any = logging.get_logger('''transformers-cli/serving''')
def lowerCAmelCase_( lowercase_ : Namespace ) -> List[Any]:
_lowerCamelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(lowercase_ , args.host , args.port , args.workers )
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : dict
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[str]
lowercase__ : Optional[List[int]]
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : str
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Any
class lowerCamelCase_( A__ ):
'''simple docstring'''
@staticmethod
def snake_case__ ( lowerCamelCase__ ):
_lowerCamelCase = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=lowerCamelCase__ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=lowerCamelCase__ , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=lowerCamelCase__ , default=8_8_8_8 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=lowerCamelCase__ , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=lowerCamelCase__ , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=lowerCamelCase__ , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=lowerCamelCase__ , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=lowerCamelCase__ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = pipeline
_lowerCamelCase = host
_lowerCamelCase = port
_lowerCamelCase = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F"""Serving model over {host}:{port}""" )
_lowerCamelCase = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''POST'''] , ),
] , timeout=6_0_0 , )
def snake_case__ ( self ):
run(self._app , host=self.host , port=self.port , workers=self.workers )
def snake_case__ ( self ):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def snake_case__ ( self , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) ):
try:
_lowerCamelCase = self._pipeline.tokenizer.tokenize(lowerCamelCase__ )
if return_ids:
_lowerCamelCase = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
return ServeTokenizeResult(tokens=lowerCamelCase__ , tokens_ids=lowerCamelCase__ )
else:
return ServeTokenizeResult(tokens=lowerCamelCase__ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'''model''': '''''', '''error''': str(lowerCamelCase__ )} )
def snake_case__ ( self , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , ):
try:
_lowerCamelCase = self._pipeline.tokenizer.decode(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return ServeDeTokenizeResult(model='''''' , text=lowerCamelCase__ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'''model''': '''''', '''error''': str(lowerCamelCase__ )} )
async def snake_case__ ( self , lowerCamelCase__=Body(lowerCamelCase__ , embed=lowerCamelCase__ ) ):
# Check we don't have empty string
if len(lowerCamelCase__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
_lowerCamelCase = self._pipeline(lowerCamelCase__ )
return ServeForwardResult(output=lowerCamelCase__ )
except Exception as e:
raise HTTPException(5_0_0 , {'''error''': str(lowerCamelCase__ )} )
| 661 | 1 |
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__SCREAMING_SNAKE_CASE : List[str] = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Dict ) -> Any:
warnings.warn(lowercase_ , lowercase_ )
requires_backends(lowercase_ , '''sklearn''' )
return (preds == labels).mean()
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : Union[str, Any] ) -> int:
warnings.warn(lowercase_ , lowercase_ )
requires_backends(lowercase_ , '''sklearn''' )
_lowerCamelCase = simple_accuracy(lowercase_ , lowercase_ )
_lowerCamelCase = fa_score(y_true=lowercase_ , y_pred=lowercase_ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def lowerCAmelCase_( lowercase_ : str , lowercase_ : List[Any] ) -> Union[str, Any]:
warnings.warn(lowercase_ , lowercase_ )
requires_backends(lowercase_ , '''sklearn''' )
_lowerCamelCase = pearsonr(lowercase_ , lowercase_ )[0]
_lowerCamelCase = spearmanr(lowercase_ , lowercase_ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def lowerCAmelCase_( lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Dict ) -> Dict:
warnings.warn(lowercase_ , lowercase_ )
requires_backends(lowercase_ , '''sklearn''' )
assert len(lowercase_ ) == len(lowercase_ ), F"""Predictions and labels have mismatched lengths {len(lowercase_ )} and {len(lowercase_ )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(lowercase_ , lowercase_ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(lowercase_ , lowercase_ )}
elif task_name == "mrpc":
return acc_and_fa(lowercase_ , lowercase_ )
elif task_name == "sts-b":
return pearson_and_spearman(lowercase_ , lowercase_ )
elif task_name == "qqp":
return acc_and_fa(lowercase_ , lowercase_ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(lowercase_ , lowercase_ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(lowercase_ , lowercase_ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(lowercase_ , lowercase_ )}
elif task_name == "rte":
return {"acc": simple_accuracy(lowercase_ , lowercase_ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(lowercase_ , lowercase_ )}
elif task_name == "hans":
return {"acc": simple_accuracy(lowercase_ , lowercase_ )}
else:
raise KeyError(lowercase_ )
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : List[str] ) -> str:
warnings.warn(lowercase_ , lowercase_ )
requires_backends(lowercase_ , '''sklearn''' )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(F"""Predictions and labels have mismatched lengths {len(lowercase_ )} and {len(lowercase_ )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(lowercase_ , lowercase_ )}
else:
raise KeyError(lowercase_ )
| 661 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Dict ) -> List[Any]:
assert isinstance(lowercase_ , lowercase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : str ) -> List[Any]:
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCamelCase = JsonDatasetReader(lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read()
_check_json_dataset(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : int ) -> Optional[int]:
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowerCamelCase = features.copy() if features else default_expected_features
_lowerCamelCase = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase = JsonDatasetReader(lowercase_ , features=lowercase_ , cache_dir=lowercase_ ).read()
_check_json_dataset(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : List[Any] ) -> Tuple:
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
_lowerCamelCase = features.copy() if features else default_expected_features
_lowerCamelCase = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase = JsonDatasetReader(lowercase_ , features=lowercase_ , cache_dir=lowercase_ ).read()
assert isinstance(lowercase_ , lowercase_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Optional[int] ) -> List[str]:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
_lowerCamelCase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
_lowerCamelCase = features.copy()
_lowerCamelCase = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = JsonDatasetReader(lowercase_ , features=lowercase_ , cache_dir=lowercase_ ).read()
assert isinstance(lowercase_ , lowercase_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] ) -> int:
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowerCamelCase = JsonDatasetReader(lowercase_ , cache_dir=lowercase_ , split=lowercase_ ).read()
_check_json_dataset(lowercase_ , lowercase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Tuple ) -> Optional[int]:
if issubclass(lowercase_ , lowercase_ ):
_lowerCamelCase = jsonl_path
elif issubclass(lowercase_ , lowercase_ ):
_lowerCamelCase = [jsonl_path]
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowerCamelCase = JsonDatasetReader(lowercase_ , cache_dir=lowercase_ ).read()
_check_json_dataset(lowercase_ , lowercase_ )
def lowerCAmelCase_( lowercase_ : int , lowercase_ : List[str] , lowercase_ : str=("train",) ) -> List[str]:
assert isinstance(lowercase_ , lowercase_ )
for split in splits:
_lowerCamelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : int ) -> Dict:
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCamelCase = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read()
_check_json_datasetdict(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase_( lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] ) -> List[Any]:
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowerCamelCase = features.copy() if features else default_expected_features
_lowerCamelCase = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase = JsonDatasetReader({'''train''': jsonl_path} , features=lowercase_ , cache_dir=lowercase_ ).read()
_check_json_datasetdict(lowercase_ , lowercase_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase_( lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : List[str] ) -> Optional[Any]:
if split:
_lowerCamelCase = {split: jsonl_path}
else:
_lowerCamelCase = '''train'''
_lowerCamelCase = {'''train''': jsonl_path, '''test''': jsonl_path}
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowerCamelCase = JsonDatasetReader(lowercase_ , cache_dir=lowercase_ ).read()
_check_json_datasetdict(lowercase_ , lowercase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_( lowercase_ : Optional[int] ) -> Optional[Any]:
return json.load(lowercase_ )
def lowerCAmelCase_( lowercase_ : Tuple ) -> Tuple:
return [json.loads(lowercase_ ) for line in buffer]
class lowerCamelCase_:
'''simple docstring'''
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , lines=lowerCamelCase__ ).write()
buffer.seek(0 )
_lowerCamelCase = load_json_function(lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert isinstance(exported_content[0] , lowerCamelCase__ )
assert len(lowerCamelCase__ ) == 1_0
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , lines=lowerCamelCase__ , orient=lowerCamelCase__ ).write()
buffer.seek(0 )
_lowerCamelCase = load_json(lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCamelCase__ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(lowerCamelCase__ ) == 1_0
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , lines=lowerCamelCase__ , num_proc=2 ).write()
buffer.seek(0 )
_lowerCamelCase = load_json_function(lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert isinstance(exported_content[0] , lowerCamelCase__ )
assert len(lowerCamelCase__ ) == 1_0
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , lines=lowerCamelCase__ , orient=lowerCamelCase__ , num_proc=2 ).write()
buffer.seek(0 )
_lowerCamelCase = load_json(lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCamelCase__ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(lowerCamelCase__ ) == 1_0
def snake_case__ ( self , lowerCamelCase__ ):
with pytest.raises(lowerCamelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = tmp_path_factory.mktemp('''data''' ) / F"""test.json.{extension}"""
_lowerCamelCase = str(shared_datadir / F"""test_file.json.{extension}""" )
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , compression=lowerCamelCase__ ).write()
with fsspec.open(lowerCamelCase__ , '''rb''' , compression='''infer''' ) as f:
_lowerCamelCase = f.read()
with fsspec.open(lowerCamelCase__ , '''rb''' , compression='''infer''' ) as f:
_lowerCamelCase = f.read()
assert exported_content == original_content
| 661 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def lowerCAmelCase_( lowercase_ : float , lowercase_ : float , lowercase_ : float ) -> tuple:
_lowerCamelCase = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661 |
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=3 , lowerCamelCase__=3_2 , lowerCamelCase__=3 , lowerCamelCase__=1_0 , lowerCamelCase__=[8, 1_6, 3_2, 6_4] , lowerCamelCase__=[1, 1, 2, 1] , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="relu" , lowerCamelCase__=3 , lowerCamelCase__=None , lowerCamelCase__=["stage2", "stage3", "stage4"] , lowerCamelCase__=[2, 3, 4] , lowerCamelCase__=1 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = num_channels
_lowerCamelCase = embeddings_size
_lowerCamelCase = hidden_sizes
_lowerCamelCase = depths
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_act
_lowerCamelCase = num_labels
_lowerCamelCase = scope
_lowerCamelCase = len(lowerCamelCase__ )
_lowerCamelCase = out_features
_lowerCamelCase = out_indices
_lowerCamelCase = num_groups
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels )
_lowerCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = BitModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = BitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = BitBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_lowerCamelCase = None
_lowerCamelCase = BitBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Dict = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowercase__ : Any = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : Union[str, Any] = False
lowercase__ : List[Any] = False
lowercase__ : Any = False
lowercase__ : List[str] = False
lowercase__ : Any = False
def snake_case__ ( self ):
_lowerCamelCase = BitModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def snake_case__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self ):
return
@unittest.skip(reason='''Bit does not output attentions''' )
def snake_case__ ( self ):
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(config=lowerCamelCase__ )
for name, module in model.named_modules():
if isinstance(lowerCamelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def snake_case__ ( self ):
def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_lowerCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCamelCase = layer_type
_lowerCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def snake_case__ ( self ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = BitModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase_( ) -> List[Any]:
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def snake_case__ ( self ):
_lowerCamelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase__ )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_lowerCamelCase = model(**lowerCamelCase__ )
# verify the logits
_lowerCamelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowerCamelCase = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
@require_torch
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = (BitBackbone,) if is_torch_available() else ()
lowercase__ : Tuple = BitConfig
lowercase__ : Any = False
def snake_case__ ( self ):
_lowerCamelCase = BitModelTester(self )
| 661 | 1 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCAmelCase_( ) -> List[str]:
raise RuntimeError('''CUDA out of memory.''' )
class lowerCamelCase_( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
_lowerCamelCase = nn.Linear(3 , 4 )
_lowerCamelCase = nn.BatchNormad(4 )
_lowerCamelCase = nn.Linear(4 , 5 )
def snake_case__ ( self , lowerCamelCase__ ):
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase__ ) ) )
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowerCamelCase__ ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase__ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase__ , [1_2_8, 6_4, 3_2, 1_6, 8] )
def snake_case__ ( self ):
_lowerCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowerCamelCase__ , lowerCamelCase__ ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase__ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_lowerCamelCase , _lowerCamelCase = mock_training_loop_function('''hello''' )
self.assertListEqual(lowerCamelCase__ , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def snake_case__ ( self ):
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowerCamelCase__ ):
pass
with self.assertRaises(lowerCamelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self ):
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(lowerCamelCase__ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self ):
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase__ ) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def snake_case__ ( self ):
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(lowerCamelCase__ ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(lowerCamelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def snake_case__ ( self ):
_lowerCamelCase = torch.cuda.memory_allocated()
_lowerCamelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase__ )
_lowerCamelCase = release_memory(lowerCamelCase__ )
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase__ )
| 661 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=2 , lowerCamelCase__=2_4 , lowerCamelCase__=1_6 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=3_2 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1_0 , lowerCamelCase__=0.0_2 , lowerCamelCase__=None , lowerCamelCase__=2 , lowerCamelCase__=2 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = patch_size
_lowerCamelCase = max_length
_lowerCamelCase = num_mel_bins
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = scope
_lowerCamelCase = frequency_stride
_lowerCamelCase = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCamelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_lowerCamelCase = (self.max_length - self.patch_size) // self.time_stride + 1
_lowerCamelCase = frequency_out_dimension * time_out_dimension
_lowerCamelCase = num_patches + 2
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = self.get_config()
return config, input_values, labels
def snake_case__ ( self ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = ASTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = config_and_inputs
_lowerCamelCase = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Any = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowercase__ : List[str] = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
lowercase__ : int = False
lowercase__ : str = False
lowercase__ : Union[str, Any] = False
lowercase__ : List[str] = False
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def snake_case__ ( self ):
_lowerCamelCase = ASTModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=3_7 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''input_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@slow
def snake_case__ ( self ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = ASTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase_( ) -> str:
_lowerCamelCase = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
_lowerCamelCase , _lowerCamelCase = torchaudio.load(lowercase_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def snake_case__ ( self ):
_lowerCamelCase = self.default_feature_extractor
_lowerCamelCase = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(lowerCamelCase__ )
_lowerCamelCase = self.default_feature_extractor
_lowerCamelCase , _lowerCamelCase = prepare_audio()
_lowerCamelCase = audio.squeeze().numpy()
_lowerCamelCase = feature_extractor(lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_lowerCamelCase = model(**lowerCamelCase__ )
# verify the logits
_lowerCamelCase = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowerCamelCase = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 661 | 1 |
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = (DPMSolverSDEScheduler,)
lowercase__ : Optional[int] = 10
def snake_case__ ( self , **lowerCamelCase__ ):
_lowerCamelCase = {
'''num_train_timesteps''': 1_1_0_0,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**lowerCamelCase__ )
return config
def snake_case__ ( self ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def snake_case__ ( self ):
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowerCamelCase__ , beta_end=lowerCamelCase__ )
def snake_case__ ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def snake_case__ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.scheduler_classes[0]
_lowerCamelCase = self.get_scheduler_config()
_lowerCamelCase = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCamelCase = self.dummy_model()
_lowerCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase = sample.to(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCamelCase = scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = output.prev_sample
_lowerCamelCase = torch.sum(torch.abs(lowerCamelCase__ ) )
_lowerCamelCase = torch.mean(torch.abs(lowerCamelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1e-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1e-3
def snake_case__ ( self ):
_lowerCamelCase = self.scheduler_classes[0]
_lowerCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
_lowerCamelCase = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCamelCase = self.dummy_model()
_lowerCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase = sample.to(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCamelCase = scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = output.prev_sample
_lowerCamelCase = torch.sum(torch.abs(lowerCamelCase__ ) )
_lowerCamelCase = torch.mean(torch.abs(lowerCamelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1e-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1e-3
def snake_case__ ( self ):
_lowerCamelCase = self.scheduler_classes[0]
_lowerCamelCase = self.get_scheduler_config()
_lowerCamelCase = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase__ )
_lowerCamelCase = self.dummy_model()
_lowerCamelCase = self.dummy_sample_deter.to(lowerCamelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCamelCase = scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = output.prev_sample
_lowerCamelCase = torch.sum(torch.abs(lowerCamelCase__ ) )
_lowerCamelCase = torch.mean(torch.abs(lowerCamelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1e-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1e-3
def snake_case__ ( self ):
_lowerCamelCase = self.scheduler_classes[0]
_lowerCamelCase = self.get_scheduler_config()
_lowerCamelCase = scheduler_class(**lowerCamelCase__ , use_karras_sigmas=lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase__ )
_lowerCamelCase = self.dummy_model()
_lowerCamelCase = self.dummy_sample_deter.to(lowerCamelCase__ ) * scheduler.init_noise_sigma
_lowerCamelCase = sample.to(lowerCamelCase__ )
for t in scheduler.timesteps:
_lowerCamelCase = scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = output.prev_sample
_lowerCamelCase = torch.sum(torch.abs(lowerCamelCase__ ) )
_lowerCamelCase = torch.mean(torch.abs(lowerCamelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2
| 661 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowerCamelCase_:
'''simple docstring'''
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
return None
class lowerCamelCase_:
'''simple docstring'''
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
return None
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
lowercase__ : Tuple = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def snake_case__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase__ , '''tf''' , 1_2 , **lowerCamelCase__ )
@require_torch
@slow
def snake_case__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase__ , '''pt''' , 1_2 , **lowerCamelCase__ )
@require_torch
@slow
def snake_case__ ( self ):
from transformers import BertModel
_lowerCamelCase = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(lowerCamelCase__ ) )
vocab_file.flush()
_lowerCamelCase = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
_lowerCamelCase = BertModel(BertConfig(vocab_size=len(lowerCamelCase__ ) ) )
model.save_pretrained(lowerCamelCase__ )
self._test_export(lowerCamelCase__ , '''pt''' , 1_2 , lowerCamelCase__ )
@require_tf
@slow
def snake_case__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_lowerCamelCase = self._test_export(lowerCamelCase__ , '''tf''' , 1_2 , **lowerCamelCase__ )
_lowerCamelCase = quantize(Path(lowerCamelCase__ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase__ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def snake_case__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_lowerCamelCase = self._test_export(lowerCamelCase__ , '''pt''' , 1_2 , **lowerCamelCase__ )
_lowerCamelCase = quantize(lowerCamelCase__ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase__ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
_lowerCamelCase = Path(lowerCamelCase__ ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
return path
except Exception as e:
self.fail(lowerCamelCase__ )
@require_torch
@require_tokenizers
@slow
def snake_case__ ( self ):
from transformers import BertModel
_lowerCamelCase = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
_lowerCamelCase = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(lowerCamelCase__ , lowerCamelCase__ , '''pt''' )
@require_tf
@require_tokenizers
@slow
def snake_case__ ( self ):
from transformers import TFBertModel
_lowerCamelCase = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
_lowerCamelCase = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(lowerCamelCase__ , lowerCamelCase__ , '''tf''' )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = FeatureExtractionPipeline(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = infer_shapes(lowerCamelCase__ , lowerCamelCase__ )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , lowerCamelCase__ )
self.assertSequenceEqual(variable_names[3:] , lowerCamelCase__ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def snake_case__ ( self ):
_lowerCamelCase = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
_lowerCamelCase = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
_lowerCamelCase , _lowerCamelCase = ensure_valid_input(FuncContiguousArgs() , lowerCamelCase__ , lowerCamelCase__ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase__ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase__ ) , set(lowerCamelCase__ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase__ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
_lowerCamelCase , _lowerCamelCase = ensure_valid_input(FuncNonContiguousArgs() , lowerCamelCase__ , lowerCamelCase__ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase__ ) , 1 )
self.assertEqual(len(lowerCamelCase__ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def snake_case__ ( self ):
_lowerCamelCase = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
| 661 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Dict = 'wavlm'
def __init__( self , lowerCamelCase__=3_2 , lowerCamelCase__=7_6_8 , lowerCamelCase__=1_2 , lowerCamelCase__=1_2 , lowerCamelCase__=3_0_7_2 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0_2 , lowerCamelCase__=1e-5 , lowerCamelCase__="group" , lowerCamelCase__="gelu" , lowerCamelCase__=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCamelCase__=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase__=(1_0, 3, 3, 3, 3, 2, 2) , lowerCamelCase__=False , lowerCamelCase__=1_2_8 , lowerCamelCase__=1_6 , lowerCamelCase__=3_2_0 , lowerCamelCase__=8_0_0 , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=0.0_5 , lowerCamelCase__=1_0 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=1_0 , lowerCamelCase__=3_2_0 , lowerCamelCase__=2 , lowerCamelCase__=0.1 , lowerCamelCase__=1_0_0 , lowerCamelCase__=2_5_6 , lowerCamelCase__=2_5_6 , lowerCamelCase__=0.1 , lowerCamelCase__="mean" , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=2_5_6 , lowerCamelCase__=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowerCamelCase__=(5, 3, 3, 1, 1) , lowerCamelCase__=(1, 2, 3, 1, 1) , lowerCamelCase__=5_1_2 , lowerCamelCase__=8_0 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , lowerCamelCase__=False , lowerCamelCase__=3 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=None , **lowerCamelCase__ , ):
super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
_lowerCamelCase = hidden_size
_lowerCamelCase = feat_extract_norm
_lowerCamelCase = feat_extract_activation
_lowerCamelCase = list(lowerCamelCase__ )
_lowerCamelCase = list(lowerCamelCase__ )
_lowerCamelCase = list(lowerCamelCase__ )
_lowerCamelCase = conv_bias
_lowerCamelCase = num_buckets
_lowerCamelCase = max_bucket_distance
_lowerCamelCase = num_conv_pos_embeddings
_lowerCamelCase = num_conv_pos_embedding_groups
_lowerCamelCase = len(self.conv_dim )
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = num_attention_heads
_lowerCamelCase = hidden_dropout
_lowerCamelCase = attention_dropout
_lowerCamelCase = activation_dropout
_lowerCamelCase = feat_proj_dropout
_lowerCamelCase = final_dropout
_lowerCamelCase = layerdrop
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = initializer_range
_lowerCamelCase = num_ctc_classes
_lowerCamelCase = vocab_size
_lowerCamelCase = do_stable_layer_norm
_lowerCamelCase = use_weighted_layer_sum
_lowerCamelCase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase = apply_spec_augment
_lowerCamelCase = mask_time_prob
_lowerCamelCase = mask_time_length
_lowerCamelCase = mask_time_min_masks
_lowerCamelCase = mask_feature_prob
_lowerCamelCase = mask_feature_length
# parameters for pretraining with codevector quantized representations
_lowerCamelCase = num_codevectors_per_group
_lowerCamelCase = num_codevector_groups
_lowerCamelCase = contrastive_logits_temperature
_lowerCamelCase = num_negatives
_lowerCamelCase = codevector_dim
_lowerCamelCase = proj_codevector_dim
_lowerCamelCase = diversity_loss_weight
# ctc loss
_lowerCamelCase = ctc_loss_reduction
_lowerCamelCase = ctc_zero_infinity
# adapter
_lowerCamelCase = add_adapter
_lowerCamelCase = adapter_kernel_size
_lowerCamelCase = adapter_stride
_lowerCamelCase = num_adapter_layers
_lowerCamelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCamelCase = list(lowerCamelCase__ )
_lowerCamelCase = list(lowerCamelCase__ )
_lowerCamelCase = list(lowerCamelCase__ )
_lowerCamelCase = xvector_output_dim
@property
def snake_case__ ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 661 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = {str(digit): digit**5 for digit in range(1_0)}
def lowerCAmelCase_( lowercase_ : int ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowercase_ ) )
def lowerCAmelCase_( ) -> int:
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(lowercase_ ) )
if __name__ == "__main__":
print(solution())
| 661 | 1 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCAmelCase_( lowercase_ : List[str] ) -> Dict:
_lowerCamelCase = filter(lambda lowercase_ : p.requires_grad , model.parameters() )
_lowerCamelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__SCREAMING_SNAKE_CASE : str = logging.getLogger(__name__)
def lowerCAmelCase_( lowercase_ : Tuple , lowercase_ : Tuple ) -> List[str]:
if metric == "rouge2":
_lowerCamelCase = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
_lowerCamelCase = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
_lowerCamelCase = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
_lowerCamelCase = ModelCheckpoint(
dirpath=lowercase_ , filename=lowercase_ , monitor=F"""val_{metric}""" , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Tuple ) -> List[str]:
return EarlyStopping(
monitor=F"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=lowercase_ , verbose=lowercase_ , )
class lowerCamelCase_( pl.Callback ):
'''simple docstring'''
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = {F"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCamelCase__ )
@rank_zero_only
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True ):
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
_lowerCamelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
_lowerCamelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
_lowerCamelCase = od / '''test_results.txt'''
_lowerCamelCase = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_lowerCamelCase = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
_lowerCamelCase = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCamelCase__ )
generations_file.parent.mkdir(exist_ok=lowerCamelCase__ )
with open(lowerCamelCase__ , '''a+''' ) as writer:
for key in sorted(lowerCamelCase__ ):
if key in ["log", "progress_bar", "preds"]:
continue
_lowerCamelCase = metrics[key]
if isinstance(lowerCamelCase__ , torch.Tensor ):
_lowerCamelCase = val.item()
_lowerCamelCase = F"""{key}: {val:.6f}\n"""
writer.write(lowerCamelCase__ )
if not save_generations:
return
if "preds" in metrics:
_lowerCamelCase = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(lowerCamelCase__ )
@rank_zero_only
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
try:
_lowerCamelCase = pl_module.model.model.num_parameters()
except AttributeError:
_lowerCamelCase = pl_module.model.num_parameters()
_lowerCamelCase = count_trainable_parameters(lowerCamelCase__ )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCamelCase__ , lowerCamelCase__ , '''test''' )
@rank_zero_only
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 661 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
__SCREAMING_SNAKE_CASE : str = tuple[int, int]
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = vertices
_lowerCamelCase = {
(min(lowerCamelCase__ ), max(lowerCamelCase__ )): weight for edge, weight in edges.items()
}
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_lowerCamelCase = weight
def snake_case__ ( self ):
_lowerCamelCase = Graph({min(self.vertices )} , {} )
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
while len(subgraph.vertices ) < len(self.vertices ):
_lowerCamelCase = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_lowerCamelCase = edge
_lowerCamelCase = weight
subgraph.add_edge(lowerCamelCase__ , lowerCamelCase__ )
return subgraph
def lowerCAmelCase_( lowercase_ : str = "p107_network.txt" ) -> int:
_lowerCamelCase = os.path.abspath(os.path.dirname(lowercase_ ) )
_lowerCamelCase = os.path.join(lowercase_ , lowercase_ )
_lowerCamelCase = {}
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
with open(lowercase_ ) as f:
_lowerCamelCase = f.read().strip().split('''\n''' )
_lowerCamelCase = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(lowercase_ ) ):
for edgea in range(lowercase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
_lowerCamelCase = int(adjaceny_matrix[edgea][edgea] )
_lowerCamelCase = Graph(set(range(len(lowercase_ ) ) ) , lowercase_ )
_lowerCamelCase = graph.prims_algorithm()
_lowerCamelCase = sum(graph.edges.values() )
_lowerCamelCase = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 661 | 1 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def lowerCAmelCase_( lowercase_ : list[float] ) -> int:
return np.maximum(0 , lowercase_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 661 |
"""simple docstring"""
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCAmelCase_( lowercase_ : int = 2_00_00_00 ) -> int:
_lowerCamelCase = [0]
_lowerCamelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_lowerCamelCase = 0
# the area corresponding to the grid that gives the product closest to target
_lowerCamelCase = 0
# an estimate of b, using the quadratic formula
_lowerCamelCase = 42
# the largest integer less than b_estimate
_lowerCamelCase = 42
# the largest integer less than b_estimate
_lowerCamelCase = 42
# the triangle number corresponding to b_floor
_lowerCamelCase = 42
# the triangle number corresponding to b_ceil
_lowerCamelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_lowerCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_lowerCamelCase = floor(lowercase_ )
_lowerCamelCase = ceil(lowercase_ )
_lowerCamelCase = triangle_numbers[b_floor]
_lowerCamelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_lowerCamelCase = triangle_b_first_guess * triangle_a
_lowerCamelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_lowerCamelCase = triangle_b_second_guess * triangle_a
_lowerCamelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 661 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
__SCREAMING_SNAKE_CASE : str = tuple[int, int]
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = vertices
_lowerCamelCase = {
(min(lowerCamelCase__ ), max(lowerCamelCase__ )): weight for edge, weight in edges.items()
}
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_lowerCamelCase = weight
def snake_case__ ( self ):
_lowerCamelCase = Graph({min(self.vertices )} , {} )
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
while len(subgraph.vertices ) < len(self.vertices ):
_lowerCamelCase = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_lowerCamelCase = edge
_lowerCamelCase = weight
subgraph.add_edge(lowerCamelCase__ , lowerCamelCase__ )
return subgraph
def lowerCAmelCase_( lowercase_ : str = "p107_network.txt" ) -> int:
_lowerCamelCase = os.path.abspath(os.path.dirname(lowercase_ ) )
_lowerCamelCase = os.path.join(lowercase_ , lowercase_ )
_lowerCamelCase = {}
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
with open(lowercase_ ) as f:
_lowerCamelCase = f.read().strip().split('''\n''' )
_lowerCamelCase = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(lowercase_ ) ):
for edgea in range(lowercase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
_lowerCamelCase = int(adjaceny_matrix[edgea][edgea] )
_lowerCamelCase = Graph(set(range(len(lowercase_ ) ) ) , lowercase_ )
_lowerCamelCase = graph.prims_algorithm()
_lowerCamelCase = sum(graph.edges.values() )
_lowerCamelCase = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 661 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 661 | 1 |
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__SCREAMING_SNAKE_CASE : Dict = True
except ImportError:
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCAmelCase_( lowercase_ : Namespace ) -> Optional[Any]:
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowerCamelCase_( A__ ):
'''simple docstring'''
@staticmethod
def snake_case__ ( lowerCamelCase__ ):
_lowerCamelCase = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=lowerCamelCase__ , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=lowerCamelCase__ , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , *lowerCamelCase__ ):
_lowerCamelCase = testing
_lowerCamelCase = testing_file
_lowerCamelCase = path
def snake_case__ ( self ):
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
_lowerCamelCase = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:2_2]]
if len(lowerCamelCase__ ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
_lowerCamelCase = (
Path(lowerCamelCase__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
_lowerCamelCase = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowerCamelCase__ ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
_lowerCamelCase = json.load(lowerCamelCase__ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowerCamelCase__ , extra_context=lowerCamelCase__ , )
_lowerCamelCase = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:2_2]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
_lowerCamelCase = json.load(lowerCamelCase__ )
_lowerCamelCase = configuration['''lowercase_modelname''']
_lowerCamelCase = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(F"""{directory}/configuration.json""" )
_lowerCamelCase = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
_lowerCamelCase = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
_lowerCamelCase = '''Flax''' in generate_tensorflow_pytorch_and_flax
_lowerCamelCase = F"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
os.makedirs(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=lowerCamelCase__ )
# Tests require submodules as they have parent imports
with open(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , '''w''' ):
pass
shutil.move(
F"""{directory}/__init__.py""" , F"""{model_dir}/__init__.py""" , )
shutil.move(
F"""{directory}/configuration_{lowercase_model_name}.py""" , F"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(lowerCamelCase__ ):
with open(lowerCamelCase__ , '''r''' ) as f:
_lowerCamelCase = f.readlines()
with open(lowerCamelCase__ , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowerCamelCase__ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_tf_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_flax_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/{lowercase_model_name}.md""" , F"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
F"""{directory}/tokenization_{lowercase_model_name}.py""" , F"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , F"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# Create temp file
_lowerCamelCase , _lowerCamelCase = mkstemp()
_lowerCamelCase = False
with fdopen(lowerCamelCase__ , '''w''' ) as new_file:
with open(lowerCamelCase__ ) as old_file:
for line in old_file:
new_file.write(lowerCamelCase__ )
if line_to_copy_below in line:
_lowerCamelCase = True
for line_to_copy in lines_to_copy:
new_file.write(lowerCamelCase__ )
if not line_found:
raise ValueError(F"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(lowerCamelCase__ , lowerCamelCase__ )
# Remove original file
remove(lowerCamelCase__ )
# Move new file
move(lowerCamelCase__ , lowerCamelCase__ )
def skip_units(lowerCamelCase__ ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowerCamelCase__ ):
with open(lowerCamelCase__ ) as datafile:
_lowerCamelCase = []
_lowerCamelCase = False
_lowerCamelCase = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
_lowerCamelCase = line.split('''"''' )[1]
_lowerCamelCase = skip_units(lowerCamelCase__ )
elif "# Below: " in line and "##" not in line:
_lowerCamelCase = line.split('''"''' )[1]
_lowerCamelCase = skip_units(lowerCamelCase__ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = []
elif "# Replace with" in line and "##" not in line:
_lowerCamelCase = []
elif "##" not in line:
lines_to_copy.append(lowerCamelCase__ )
remove(lowerCamelCase__ )
replace_in_files(F"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(lowerCamelCase__ )
| 661 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
__SCREAMING_SNAKE_CASE : str = {
'''distilbert-base-uncased''': 5_1_2,
'''distilbert-base-uncased-distilled-squad''': 5_1_2,
'''distilbert-base-cased''': 5_1_2,
'''distilbert-base-cased-distilled-squad''': 5_1_2,
'''distilbert-base-german-cased''': 5_1_2,
'''distilbert-base-multilingual-cased''': 5_1_2,
}
__SCREAMING_SNAKE_CASE : str = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[Any] = VOCAB_FILES_NAMES
lowercase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : str = PRETRAINED_INIT_CONFIGURATION
lowercase__ : int = ['input_ids', 'attention_mask']
lowercase__ : Tuple = DistilBertTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__="[UNK]" , lowerCamelCase__="[SEP]" , lowerCamelCase__="[PAD]" , lowerCamelCase__="[CLS]" , lowerCamelCase__="[MASK]" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ):
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
_lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCamelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCamelCase__ ) != tokenize_chinese_chars
):
_lowerCamelCase = getattr(lowerCamelCase__ , normalizer_state.pop('''type''' ) )
_lowerCamelCase = do_lower_case
_lowerCamelCase = strip_accents
_lowerCamelCase = tokenize_chinese_chars
_lowerCamelCase = normalizer_class(**lowerCamelCase__ )
_lowerCamelCase = do_lower_case
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=None ):
_lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 661 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Tuple = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 661 |
"""simple docstring"""
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__SCREAMING_SNAKE_CASE : str = 2_9_9_7_9_2_4_5_8
# Symbols
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = symbols('''ct x y z''')
def lowerCAmelCase_( lowercase_ : float ) -> float:
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCAmelCase_( lowercase_ : float ) -> float:
return 1 / sqrt(1 - beta(lowercase_ ) ** 2 )
def lowerCAmelCase_( lowercase_ : float ) -> np.ndarray:
return np.array(
[
[gamma(lowercase_ ), -gamma(lowercase_ ) * beta(lowercase_ ), 0, 0],
[-gamma(lowercase_ ) * beta(lowercase_ ), gamma(lowercase_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCAmelCase_( lowercase_ : float , lowercase_ : np.ndarray | None = None ) -> np.ndarray:
# Ensure event is not empty
if event is None:
_lowerCamelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(lowercase_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__SCREAMING_SNAKE_CASE : List[str] = transform(2_9_9_7_9_2_4_5)
print('''Example of four vector: ''')
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
__SCREAMING_SNAKE_CASE : Tuple = {ct: c, x: 1, y: 1, z: 1}
__SCREAMING_SNAKE_CASE : int = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 661 | 1 |
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
__SCREAMING_SNAKE_CASE : str = '''src/transformers'''
# Matches is_xxx_available()
__SCREAMING_SNAKE_CASE : Any = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
__SCREAMING_SNAKE_CASE : List[Any] = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
__SCREAMING_SNAKE_CASE : int = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
__SCREAMING_SNAKE_CASE : List[Any] = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__SCREAMING_SNAKE_CASE : Optional[int] = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
__SCREAMING_SNAKE_CASE : str = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
__SCREAMING_SNAKE_CASE : Optional[int] = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
__SCREAMING_SNAKE_CASE : List[Any] = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
__SCREAMING_SNAKE_CASE : List[str] = re.compile(R'''^\s*try:''')
# Catches a line with else:
__SCREAMING_SNAKE_CASE : List[str] = re.compile(R'''^\s*else:''')
def lowerCAmelCase_( lowercase_ : List[Any] ) -> Optional[int]:
if _re_test_backend.search(lowercase_ ) is None:
return None
_lowerCamelCase = [b[0] for b in _re_backend.findall(lowercase_ )]
backends.sort()
return "_and_".join(lowercase_ )
def lowerCAmelCase_( lowercase_ : List[str] ) -> int:
with open(lowercase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_lowerCamelCase = f.readlines()
_lowerCamelCase = 0
while line_index < len(lowercase_ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase_ ):
return None
# First grab the objects without a specific backend in _import_structure
_lowerCamelCase = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
_lowerCamelCase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase_ ):
_lowerCamelCase = _re_one_line_import_struct.search(lowercase_ ).groups()[0]
_lowerCamelCase = re.findall('''\[([^\]]+)\]''' , lowercase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
_lowerCamelCase = _re_import_struct_key_value.search(lowercase_ )
if single_line_import_search is not None:
_lowerCamelCase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(lowercase_ ) > 0]
objects.extend(lowercase_ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
_lowerCamelCase = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_lowerCamelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCamelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCamelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
_lowerCamelCase = lines[line_index]
if _re_import_struct_add_one.search(lowercase_ ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase_ ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase_ ) is not None:
_lowerCamelCase = _re_import_struct_add_many.search(lowercase_ ).groups()[0].split(''', ''' )
_lowerCamelCase = [obj[1:-1] for obj in imports if len(lowercase_ ) > 0]
objects.extend(lowercase_ )
elif _re_between_brackets.search(lowercase_ ) is not None:
_lowerCamelCase = _re_between_brackets.search(lowercase_ ).groups()[0].split(''', ''' )
_lowerCamelCase = [obj[1:-1] for obj in imports if len(lowercase_ ) > 0]
objects.extend(lowercase_ )
elif _re_quote_object.search(lowercase_ ) is not None:
objects.append(_re_quote_object.search(lowercase_ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
_lowerCamelCase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_lowerCamelCase = []
while (
line_index < len(lowercase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
_lowerCamelCase = lines[line_index]
_lowerCamelCase = _re_import.search(lowercase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_lowerCamelCase = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
_lowerCamelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCamelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCamelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
_lowerCamelCase = lines[line_index]
_lowerCamelCase = _re_import.search(lowercase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_lowerCamelCase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCAmelCase_( lowercase_ : Union[str, Any] , lowercase_ : str ) -> Tuple:
def find_duplicates(lowercase_ : str ):
return [k for k, v in collections.Counter(lowercase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_lowerCamelCase = []
for key in import_dict_objects.keys():
_lowerCamelCase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
_lowerCamelCase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_lowerCamelCase = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def lowerCAmelCase_( ) -> Optional[int]:
_lowerCamelCase = []
for root, _, files in os.walk(lowercase_ ):
if "__init__.py" in files:
_lowerCamelCase = os.path.join(lowercase_ , '''__init__.py''' )
_lowerCamelCase = parse_init(lowercase_ )
if objects is not None:
_lowerCamelCase = analyze_results(*lowercase_ )
if len(lowercase_ ) > 0:
_lowerCamelCase = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(lowercase_ ) )
if len(lowercase_ ) > 0:
raise ValueError('''\n\n'''.join(lowercase_ ) )
def lowerCAmelCase_( ) -> Any:
_lowerCamelCase = []
for path, directories, files in os.walk(lowercase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(lowercase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase_ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
_lowerCamelCase = str((Path(lowercase_ ) / folder).relative_to(lowercase_ ) )
_lowerCamelCase = short_path.replace(os.path.sep , '''.''' )
submodules.append(lowercase_ )
for fname in files:
if fname == "__init__.py":
continue
_lowerCamelCase = str((Path(lowercase_ ) / fname).relative_to(lowercase_ ) )
_lowerCamelCase = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(lowercase_ )
return submodules
__SCREAMING_SNAKE_CASE : Tuple = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def lowerCAmelCase_( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
_lowerCamelCase = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(lowercase_ , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
_lowerCamelCase = spec.loader.load_module()
_lowerCamelCase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowercase_ ) > 0:
_lowerCamelCase = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 661 |
"""simple docstring"""
from __future__ import annotations
__SCREAMING_SNAKE_CASE : Dict = 1.6_0_2_1e-1_9 # units = C
def lowerCAmelCase_( lowercase_ : float , lowercase_ : float , lowercase_ : float , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCAmelCase_( lowercase_ : int , lowercase_ : int , lowercase_ : bool , lowercase_ : list[int] , lowercase_ : float ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(lowercase_ ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , lowercase_ , lowercase_ , lowercase_ ) , minimax(depth + 1 , node_index * 2 + 1 , lowercase_ , lowercase_ , lowercase_ ) , )
return min(
minimax(depth + 1 , node_index * 2 , lowercase_ , lowercase_ , lowercase_ ) , minimax(depth + 1 , node_index * 2 + 1 , lowercase_ , lowercase_ , lowercase_ ) , )
def lowerCAmelCase_( ) -> None:
_lowerCamelCase = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
_lowerCamelCase = math.log(len(lowercase_ ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , lowercase_ , lowercase_ , lowercase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 661 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6},
}
}
_lowerCamelCase = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_2_8,
'''task_specific_params.summarization.min_length''': 1_2,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_4_2,
'''task_specific_params.summarization_cnn.min_length''': 5_6,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 6_2,
'''task_specific_params.summarization_xsum.min_length''': 1_1,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(lowerCamelCase__ ) , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , x.transpose() ) )
_lowerCamelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
_lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , transpose(lowerCamelCase__ ).numpy() ) )
_lowerCamelCase = np.random.randn(3 , 4 , 5 )
_lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , transpose(lowerCamelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , transpose(lowerCamelCase__ ).numpy() ) )
_lowerCamelCase = np.random.randn(3 , 4 , 5 )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , transpose(lowerCamelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
_lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , np.asarray(transpose(lowerCamelCase__ ) ) ) )
_lowerCamelCase = np.random.randn(3 , 4 , 5 )
_lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , np.asarray(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) ) ) )
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , np.reshape(lowerCamelCase__ , (4, 3) ) ) )
_lowerCamelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (1_2, 5) ) , np.reshape(lowerCamelCase__ , (1_2, 5) ) ) )
@require_torch
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
_lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , reshape(lowerCamelCase__ , (4, 3) ).numpy() ) )
_lowerCamelCase = np.random.randn(3 , 4 , 5 )
_lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (1_2, 5) ) , reshape(lowerCamelCase__ , (1_2, 5) ).numpy() ) )
@require_tf
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , reshape(lowerCamelCase__ , (4, 3) ).numpy() ) )
_lowerCamelCase = np.random.randn(3 , 4 , 5 )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (1_2, 5) ) , reshape(lowerCamelCase__ , (1_2, 5) ).numpy() ) )
@require_flax
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
_lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , np.asarray(reshape(lowerCamelCase__ , (4, 3) ) ) ) )
_lowerCamelCase = np.random.randn(3 , 4 , 5 )
_lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (1_2, 5) ) , np.asarray(reshape(lowerCamelCase__ , (1_2, 5) ) ) ) )
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , np.squeeze(lowerCamelCase__ ) ) )
_lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , np.squeeze(lowerCamelCase__ , axis=2 ) ) )
@require_torch
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(1 , 3 , 4 )
_lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , squeeze(lowerCamelCase__ ).numpy() ) )
_lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
_lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , squeeze(lowerCamelCase__ , axis=2 ).numpy() ) )
@require_tf
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(1 , 3 , 4 )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , squeeze(lowerCamelCase__ ).numpy() ) )
_lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , squeeze(lowerCamelCase__ , axis=2 ).numpy() ) )
@require_flax
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(1 , 3 , 4 )
_lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , np.asarray(squeeze(lowerCamelCase__ ) ) ) )
_lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
_lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , np.asarray(squeeze(lowerCamelCase__ , axis=2 ) ) ) )
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , np.expand_dims(lowerCamelCase__ , axis=1 ) ) )
@require_torch
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
_lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , expand_dims(lowerCamelCase__ , axis=1 ).numpy() ) )
@require_tf
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , expand_dims(lowerCamelCase__ , axis=1 ).numpy() ) )
@require_flax
def snake_case__ ( self ):
_lowerCamelCase = np.random.randn(3 , 4 )
_lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , np.asarray(expand_dims(lowerCamelCase__ , axis=1 ) ) ) )
| 661 | 1 |
"""simple docstring"""
import torch
from torch import nn
class lowerCamelCase_( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1 , lowerCamelCase__=False ):
super().__init__()
_lowerCamelCase = n_token
_lowerCamelCase = d_embed
_lowerCamelCase = d_proj
_lowerCamelCase = cutoffs + [n_token]
_lowerCamelCase = [0] + self.cutoffs
_lowerCamelCase = div_val
_lowerCamelCase = self.cutoffs[0]
_lowerCamelCase = len(self.cutoffs ) - 1
_lowerCamelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_lowerCamelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
_lowerCamelCase = nn.Parameter(torch.zeros(self.n_clusters ) )
_lowerCamelCase = nn.ModuleList()
_lowerCamelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCamelCase__ , lowerCamelCase__ ) ) )
else:
self.out_projs.append(lowerCamelCase__ )
self.out_layers.append(nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) )
else:
for i in range(len(self.cutoffs ) ):
_lowerCamelCase , _lowerCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_lowerCamelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCamelCase__ , lowerCamelCase__ ) ) )
self.out_layers.append(nn.Linear(lowerCamelCase__ , r_idx - l_idx ) )
_lowerCamelCase = keep_order
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if proj is None:
_lowerCamelCase = nn.functional.linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_lowerCamelCase = nn.functional.linear(lowerCamelCase__ , proj.t().contiguous() )
_lowerCamelCase = nn.functional.linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=False ):
if labels is not None:
# Shift so that tokens < n predict n
_lowerCamelCase = hidden[..., :-1, :].contiguous()
_lowerCamelCase = labels[..., 1:].contiguous()
_lowerCamelCase = hidden.view(-1 , hidden.size(-1 ) )
_lowerCamelCase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
_lowerCamelCase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
_lowerCamelCase = self._compute_logit(lowerCamelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
_lowerCamelCase = labels != -1_0_0
_lowerCamelCase = torch.zeros_like(lowerCamelCase__ , dtype=hidden.dtype , device=hidden.device )
_lowerCamelCase = (
-nn.functional.log_softmax(lowerCamelCase__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
_lowerCamelCase = nn.functional.log_softmax(lowerCamelCase__ , dim=-1 )
else:
# construct weights and biases
_lowerCamelCase , _lowerCamelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_lowerCamelCase , _lowerCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_lowerCamelCase = self.out_layers[0].weight[l_idx:r_idx]
_lowerCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
_lowerCamelCase = self.out_layers[i].weight
_lowerCamelCase = self.out_layers[i].bias
if i == 0:
_lowerCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_lowerCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCamelCase__ )
biases.append(lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = weights[0], biases[0], self.out_projs[0]
_lowerCamelCase = self._compute_logit(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = nn.functional.log_softmax(lowerCamelCase__ , dim=1 )
if labels is None:
_lowerCamelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
_lowerCamelCase = torch.zeros_like(lowerCamelCase__ , dtype=hidden.dtype , device=hidden.device )
_lowerCamelCase = 0
_lowerCamelCase = [0] + self.cutoffs
for i in range(len(lowerCamelCase__ ) - 1 ):
_lowerCamelCase , _lowerCamelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_lowerCamelCase = (labels >= l_idx) & (labels < r_idx)
_lowerCamelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_lowerCamelCase = labels.index_select(0 , lowerCamelCase__ ) - l_idx
_lowerCamelCase = head_logprob.index_select(0 , lowerCamelCase__ )
_lowerCamelCase = hidden.index_select(0 , lowerCamelCase__ )
else:
_lowerCamelCase = hidden
if i == 0:
if labels is not None:
_lowerCamelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
_lowerCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = weights[i], biases[i], self.out_projs[i]
_lowerCamelCase = self._compute_logit(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = nn.functional.log_softmax(lowerCamelCase__ , dim=1 )
_lowerCamelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_lowerCamelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
_lowerCamelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_lowerCamelCase = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCamelCase__ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def snake_case__ ( self , lowerCamelCase__ ):
if self.n_clusters == 0:
_lowerCamelCase = self._compute_logit(lowerCamelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(lowerCamelCase__ , dim=-1 )
else:
# construct weights and biases
_lowerCamelCase , _lowerCamelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_lowerCamelCase , _lowerCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_lowerCamelCase = self.out_layers[0].weight[l_idx:r_idx]
_lowerCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
_lowerCamelCase = self.out_layers[i].weight
_lowerCamelCase = self.out_layers[i].bias
if i == 0:
_lowerCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_lowerCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCamelCase__ )
biases.append(lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = weights[0], biases[0], self.out_projs[0]
_lowerCamelCase = self._compute_logit(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
_lowerCamelCase = nn.functional.log_softmax(lowerCamelCase__ , dim=1 )
_lowerCamelCase = [0] + self.cutoffs
for i in range(len(lowerCamelCase__ ) - 1 ):
_lowerCamelCase , _lowerCamelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_lowerCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = weights[i], biases[i], self.out_projs[i]
_lowerCamelCase = self._compute_logit(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = nn.functional.log_softmax(lowerCamelCase__ , dim=1 )
_lowerCamelCase = head_logprob[:, -i] + tail_logprob_i
_lowerCamelCase = logprob_i
return out
| 661 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__SCREAMING_SNAKE_CASE : Tuple = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__SCREAMING_SNAKE_CASE : str = {'''facebook/blenderbot-3B''': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCAmelCase_( ) -> str:
_lowerCamelCase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
_lowerCamelCase = bs[:]
_lowerCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase_ )
cs.append(2**8 + n )
n += 1
_lowerCamelCase = [chr(lowercase_ ) for n in cs]
return dict(zip(lowercase_ , lowercase_ ) )
def lowerCAmelCase_( lowercase_ : str ) -> Dict:
_lowerCamelCase = set()
_lowerCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCamelCase = char
return pairs
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase__ : int = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Tuple = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="replace" , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=False , **lowerCamelCase__ , ):
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else bos_token
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else sep_token
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cls_token
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else unk_token
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
with open(lowerCamelCase__ , encoding='''utf-8''' ) as vocab_handle:
_lowerCamelCase = json.load(lowerCamelCase__ )
_lowerCamelCase = {v: k for k, v in self.encoder.items()}
_lowerCamelCase = errors # how to handle errors in decoding
_lowerCamelCase = bytes_to_unicode()
_lowerCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase__ , encoding='''utf-8''' ) as merges_handle:
_lowerCamelCase = merges_handle.read().split('''\n''' )[1:-1]
_lowerCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
_lowerCamelCase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
_lowerCamelCase = {}
_lowerCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCamelCase = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def snake_case__ ( self ):
return len(self.encoder )
def snake_case__ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case__ ( self , lowerCamelCase__ ):
if token in self.cache:
return self.cache[token]
_lowerCamelCase = tuple(lowerCamelCase__ )
_lowerCamelCase = get_pairs(lowerCamelCase__ )
if not pairs:
return token
while True:
_lowerCamelCase = min(lowerCamelCase__ , key=lambda lowerCamelCase__ : self.bpe_ranks.get(lowerCamelCase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCamelCase , _lowerCamelCase = bigram
_lowerCamelCase = []
_lowerCamelCase = 0
while i < len(lowerCamelCase__ ):
try:
_lowerCamelCase = word.index(lowerCamelCase__ , lowerCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCamelCase = j
if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCamelCase = tuple(lowerCamelCase__ )
_lowerCamelCase = new_word
if len(lowerCamelCase__ ) == 1:
break
else:
_lowerCamelCase = get_pairs(lowerCamelCase__ )
_lowerCamelCase = ''' '''.join(lowerCamelCase__ )
_lowerCamelCase = word
return word
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = []
for token in re.findall(self.pat , lowerCamelCase__ ):
_lowerCamelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase__ ).split(''' ''' ) )
return bpe_tokens
def snake_case__ ( self , lowerCamelCase__ ):
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) )
def snake_case__ ( self , lowerCamelCase__ ):
return self.decoder.get(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = ''''''.join(lowerCamelCase__ )
_lowerCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__ ) + '''\n''' )
_lowerCamelCase = 0
with open(lowerCamelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
_lowerCamelCase = token_index
writer.write(''' '''.join(lowerCamelCase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=False , **lowerCamelCase__ ):
_lowerCamelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase__ ) > 0 and not text[0].isspace()):
_lowerCamelCase = ''' ''' + text
return (text, kwargs)
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
return token_ids_a + [self.eos_token_id]
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(lowerCamelCase__ )
_lowerCamelCase = ''' '''.join(lowerCamelCase__ )
_lowerCamelCase = self.encode(lowerCamelCase__ )
if len(lowerCamelCase__ ) > self.model_max_length:
_lowerCamelCase = input_ids[-self.model_max_length :]
logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 661 | 1 |
"""simple docstring"""
import math
def lowerCAmelCase_( lowercase_ : int ) -> str:
_lowerCamelCase = 0
_lowerCamelCase = 0
while num > 0:
_lowerCamelCase = num % 8
_lowerCamelCase = octal + (remainder * math.floor(math.pow(10 , lowercase_ ) ))
counter += 1
_lowerCamelCase = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"""0o{int(lowercase_ )}"""
def lowerCAmelCase_( ) -> None:
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(2_16 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(5_12 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 661 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Dict = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Union[str, Any] = XLMRobertaTokenizer
lowercase__ : Optional[int] = XLMRobertaTokenizerFast
lowercase__ : List[str] = True
lowercase__ : Union[str, Any] = True
def snake_case__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase = XLMRobertaTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self ):
_lowerCamelCase = '''<pad>'''
_lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCamelCase__ ) , 1_0_0_2 )
def snake_case__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_2 )
def snake_case__ ( self ):
_lowerCamelCase = XLMRobertaTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
_lowerCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_lowerCamelCase = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def snake_case__ ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCamelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = tokenizer_r.save_pretrained(lowerCamelCase__ )
_lowerCamelCase = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
_lowerCamelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase__ , lowerCamelCase__ )
# Checks everything loads correctly in the same way
_lowerCamelCase = tokenizer_r.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase__ )
# Save tokenizer rust, legacy_format=True
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = tokenizer_r.save_pretrained(lowerCamelCase__ , legacy_format=lowerCamelCase__ )
_lowerCamelCase = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase__ , lowerCamelCase__ )
# Checks everything loads correctly in the same way
_lowerCamelCase = tokenizer_r.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
# Save tokenizer rust, legacy_format=False
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = tokenizer_r.save_pretrained(lowerCamelCase__ , legacy_format=lowerCamelCase__ )
_lowerCamelCase = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCamelCase = tokenizer_r.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
@cached_property
def snake_case__ ( self ):
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def snake_case__ ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase__ , f.name )
_lowerCamelCase = XLMRobertaTokenizer(f.name , keep_accents=lowerCamelCase__ )
_lowerCamelCase = pickle.dumps(lowerCamelCase__ )
pickle.loads(lowerCamelCase__ )
def snake_case__ ( self ):
if not self.test_rust_tokenizer:
return
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_rust_tokenizer()
_lowerCamelCase = '''I was born in 92000, and this is falsé.'''
_lowerCamelCase = tokenizer.tokenize(lowerCamelCase__ )
_lowerCamelCase = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
_lowerCamelCase = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = self.get_rust_tokenizer()
_lowerCamelCase = tokenizer.encode(lowerCamelCase__ )
_lowerCamelCase = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def snake_case__ ( self ):
_lowerCamelCase = '''Hello World!'''
_lowerCamelCase = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def snake_case__ ( self ):
_lowerCamelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
_lowerCamelCase = [
0,
3_2_9_3,
8_3,
1_0,
4_5_5_2,
4_9_8_9,
7_9_8_6,
6_7_8,
1_0,
5_9_1_5,
1_1_1,
1_7_9_4_5_9,
1_2_4_8_5_0,
4,
6_0_4_4,
2_3_7,
1_2,
6,
5,
6,
4,
6_7_8_0,
7_0_5,
1_5,
1_3_8_8,
4_4,
3_7_8,
1_0_1_1_4,
7_1_1,
1_5_2,
2_0,
6,
5,
2_2_3_7_6,
6_4_2,
1_2_2_1,
1_5_1_9_0,
3_4_1_5_3,
4_5_0,
5_6_0_8,
9_5_9,
1_1_1_9,
5_7_7_0_2,
1_3_6,
1_8_6,
4_7,
1_0_9_8,
2_9_3_6_7,
4_7,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_0_4_4,
2_3_7,
6_2_8_4,
5_0_9_0_1,
5_2_8,
3_1,
9_0,
3_4,
9_2_7,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def snake_case__ ( self ):
# fmt: off
_lowerCamelCase = {'''input_ids''': [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 661 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__SCREAMING_SNAKE_CASE : int = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 661 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : int = 10 ) -> str:
if not isinstance(lowercase_ , lowercase_ ) or n < 0:
raise ValueError('''Invalid input''' )
_lowerCamelCase = 10**n
_lowerCamelCase = 2_84_33 * (pow(2 , 7_83_04_57 , lowercase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(1_0) = }""")
| 661 | 1 |
"""simple docstring"""
import math
def lowerCAmelCase_( lowercase_ : int ) -> bool:
assert isinstance(lowercase_ , lowercase_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_lowerCamelCase = range(3 , int(math.sqrt(lowercase_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def lowerCAmelCase_( lowercase_ : str , lowercase_ : str=1 , **lowercase_ : Union[str, Any] ) -> Optional[Any]:
_lowerCamelCase = factor * value
_lowerCamelCase = value
while not is_prime(lowercase_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowercase_ )
return value
| 661 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 661 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : List[str] = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 661 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : str , lowercase_ : str ) -> float:
def get_matched_characters(lowercase_ : str , lowercase_ : str ) -> str:
_lowerCamelCase = []
_lowerCamelCase = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_lowerCamelCase = int(max(0 , i - limit ) )
_lowerCamelCase = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowercase_ )
_lowerCamelCase = F"""{_stra[0:_stra.index(lowercase_ )]} {_stra[_stra.index(lowercase_ ) + 1:]}"""
return "".join(lowercase_ )
# matching characters
_lowerCamelCase = get_matched_characters(lowercase_ , lowercase_ )
_lowerCamelCase = get_matched_characters(lowercase_ , lowercase_ )
_lowerCamelCase = len(lowercase_ )
# transposition
_lowerCamelCase = (
len([(ca, ca) for ca, ca in zip(lowercase_ , lowercase_ ) if ca != ca] ) // 2
)
if not match_count:
_lowerCamelCase = 0.0
else:
_lowerCamelCase = (
1
/ 3
* (
match_count / len(lowercase_ )
+ match_count / len(lowercase_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_lowerCamelCase = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world'''))
| 661 | 1 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : float , lowercase_ : float ) -> float:
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCamelCase_( A__, A__, A__ ):
'''simple docstring'''
lowercase__ : List[Any] = [r'h\.\d+\.attn\.bias', r'h\.\d+\.attn\.masked_bias']
@register_to_config
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = 5_0_2_5_7 , lowerCamelCase__ = 1_0_2_4 , lowerCamelCase__ = 7_6_8 , lowerCamelCase__ = 1_2 , lowerCamelCase__ = 1_2 , lowerCamelCase__ = None , lowerCamelCase__ = "gelu_new" , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 1e-5 , lowerCamelCase__ = 0.0_2 , lowerCamelCase__ = True , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = False , ):
super().__init__()
_lowerCamelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
_lowerCamelCase = prefix_inner_dim
_lowerCamelCase = prefix_hidden_dim
_lowerCamelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_lowerCamelCase = (
nn.Linear(self.prefix_hidden_dim , lowerCamelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_lowerCamelCase = GPTaConfig(
vocab_size=lowerCamelCase__ , n_positions=lowerCamelCase__ , n_embd=lowerCamelCase__ , n_layer=lowerCamelCase__ , n_head=lowerCamelCase__ , n_inner=lowerCamelCase__ , activation_function=lowerCamelCase__ , resid_pdrop=lowerCamelCase__ , embd_pdrop=lowerCamelCase__ , attn_pdrop=lowerCamelCase__ , layer_norm_epsilon=lowerCamelCase__ , initializer_range=lowerCamelCase__ , scale_attn_weights=lowerCamelCase__ , use_cache=lowerCamelCase__ , scale_attn_by_inverse_layer_idx=lowerCamelCase__ , reorder_and_upcast_attn=lowerCamelCase__ , )
_lowerCamelCase = GPTaLMHeadModel(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , ):
_lowerCamelCase = self.transformer.transformer.wte(lowerCamelCase__ )
_lowerCamelCase = self.encode_prefix(lowerCamelCase__ )
_lowerCamelCase = self.decode_prefix(lowerCamelCase__ )
_lowerCamelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_lowerCamelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_lowerCamelCase = torch.cat((dummy_token, input_ids) , dim=1 )
_lowerCamelCase = self.transformer(inputs_embeds=lowerCamelCase__ , labels=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
return torch.zeros(lowerCamelCase__ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
return self.encode_prefix(lowerCamelCase__ )
@torch.no_grad()
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = torch.split(lowerCamelCase__ , 1 , dim=0 )
_lowerCamelCase = []
_lowerCamelCase = []
for feature in features:
_lowerCamelCase = self.decode_prefix(feature.to(lowerCamelCase__ ) ) # back to the clip feature
# Only support beam search for now
_lowerCamelCase , _lowerCamelCase = self.generate_beam(
input_embeds=lowerCamelCase__ , device=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_lowerCamelCase = torch.stack(lowerCamelCase__ )
_lowerCamelCase = torch.stack(lowerCamelCase__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def snake_case__ ( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__ = 5 , lowerCamelCase__ = 6_7 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = None , ):
_lowerCamelCase = eos_token_id
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = torch.ones(lowerCamelCase__ , device=lowerCamelCase__ , dtype=torch.int )
_lowerCamelCase = torch.zeros(lowerCamelCase__ , device=lowerCamelCase__ , dtype=torch.bool )
if input_embeds is not None:
_lowerCamelCase = input_embeds
else:
_lowerCamelCase = self.transformer.transformer.wte(lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
_lowerCamelCase = self.transformer(inputs_embeds=lowerCamelCase__ )
_lowerCamelCase = outputs.logits
_lowerCamelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_lowerCamelCase = logits.softmax(-1 ).log()
if scores is None:
_lowerCamelCase , _lowerCamelCase = logits.topk(lowerCamelCase__ , -1 )
_lowerCamelCase = generated.expand(lowerCamelCase__ , *generated.shape[1:] )
_lowerCamelCase , _lowerCamelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_lowerCamelCase = next_tokens
else:
_lowerCamelCase = tokens.expand(lowerCamelCase__ , *tokens.shape[1:] )
_lowerCamelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
_lowerCamelCase = -float(np.inf )
_lowerCamelCase = 0
_lowerCamelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_lowerCamelCase = scores_sum / seq_lengths[:, None]
_lowerCamelCase , _lowerCamelCase = scores_sum_average.view(-1 ).topk(lowerCamelCase__ , -1 )
_lowerCamelCase = next_tokens // scores_sum.shape[1]
_lowerCamelCase = seq_lengths[next_tokens_source]
_lowerCamelCase = next_tokens % scores_sum.shape[1]
_lowerCamelCase = next_tokens.unsqueeze(1 )
_lowerCamelCase = tokens[next_tokens_source]
_lowerCamelCase = torch.cat((tokens, next_tokens) , dim=1 )
_lowerCamelCase = generated[next_tokens_source]
_lowerCamelCase = scores_sum_average * seq_lengths
_lowerCamelCase = is_stopped[next_tokens_source]
_lowerCamelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_lowerCamelCase = torch.cat((generated, next_token_embed) , dim=1 )
_lowerCamelCase = is_stopped + next_tokens.eq(lowerCamelCase__ ).squeeze()
if is_stopped.all():
break
_lowerCamelCase = scores / seq_lengths
_lowerCamelCase = scores.argsort(descending=lowerCamelCase__ )
# tokens tensors are already padded to max_seq_length
_lowerCamelCase = [tokens[i] for i in order]
_lowerCamelCase = torch.stack(lowerCamelCase__ , dim=0 )
_lowerCamelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 661 | 1 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=2 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=9_9 , lowerCamelCase__=3_6 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=1_6 , lowerCamelCase__=2 , lowerCamelCase__=0.0_2 , lowerCamelCase__=6 , lowerCamelCase__=6 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , lowerCamelCase__=1_0_0_0 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = num_channels
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = text_seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_input_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = coordinate_size
_lowerCamelCase = shape_size
_lowerCamelCase = num_labels
_lowerCamelCase = num_choices
_lowerCamelCase = scope
_lowerCamelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowerCamelCase = text_seq_length
_lowerCamelCase = (image_size // patch_size) ** 2 + 1
_lowerCamelCase = self.text_seq_length + self.image_seq_length
def snake_case__ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCamelCase = bbox[i, j, 3]
_lowerCamelCase = bbox[i, j, 1]
_lowerCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCamelCase = bbox[i, j, 2]
_lowerCamelCase = bbox[i, j, 0]
_lowerCamelCase = t
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_input_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowerCamelCase = None
if self.use_token_type_ids:
_lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_lowerCamelCase = None
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_lowerCamelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = LayoutLMvaModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# text + image
_lowerCamelCase = model(lowerCamelCase__ , pixel_values=lowerCamelCase__ )
_lowerCamelCase = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowerCamelCase = model(pixel_values=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = LayoutLMvaForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = LayoutLMvaForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = LayoutLMvaForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = config_and_inputs
_lowerCamelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Any = False
lowercase__ : str = False
lowercase__ : str = False
lowercase__ : int = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase__ : Any = (
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def snake_case__ ( self ):
_lowerCamelCase = LayoutLMvaModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=3_7 )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
_lowerCamelCase = copy.deepcopy(lowerCamelCase__ )
if model_class in get_values(lowerCamelCase__ ):
_lowerCamelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCamelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
_lowerCamelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ )
elif model_class in get_values(lowerCamelCase__ ):
_lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ )
_lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ )
elif model_class in [
*get_values(lowerCamelCase__ ),
]:
_lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ )
elif model_class in [
*get_values(lowerCamelCase__ ),
]:
_lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCamelCase__ , )
return inputs_dict
def snake_case__ ( self ):
self.config_tester.run_common_tests()
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase = type
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
@slow
def snake_case__ ( self ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = LayoutLMvaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase_( ) -> Union[str, Any]:
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ ) if is_vision_available() else None
@slow
def snake_case__ ( self ):
_lowerCamelCase = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(lowerCamelCase__ )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='''pt''' ).pixel_values.to(lowerCamelCase__ )
_lowerCamelCase = torch.tensor([[1, 2]] )
_lowerCamelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_lowerCamelCase = model(
input_ids=input_ids.to(lowerCamelCase__ ) , bbox=bbox.to(lowerCamelCase__ ) , pixel_values=pixel_values.to(lowerCamelCase__ ) , )
# verify the logits
_lowerCamelCase = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase__ )
_lowerCamelCase = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 661 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__SCREAMING_SNAKE_CASE : Optional[int] = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__SCREAMING_SNAKE_CASE : List[str] = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
__SCREAMING_SNAKE_CASE : Optional[Any] = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def lowerCAmelCase_( lowercase_ : Union[str, Any] ) -> str:
def remove_articles(lowercase_ : int ):
_lowerCamelCase = re.compile(r'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(lowercase_ , ''' ''' , lowercase_ )
def white_space_fix(lowercase_ : List[Any] ):
return " ".join(text.split() )
def remove_punc(lowercase_ : Dict ):
_lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase_ : Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase_ ) ) ) )
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Optional[Any] ) -> Union[str, Any]:
return int(normalize_answer(lowercase_ ) == normalize_answer(lowercase_ ) )
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Tuple ) -> Tuple:
_lowerCamelCase = [any(compute_exact(lowercase_ , lowercase_ ) for ref in refs ) for pred, refs in zip(lowercase_ , lowercase_ )]
return (sum(lowercase_ ) / len(lowercase_ )) * 1_00
def lowerCAmelCase_( lowercase_ : Tuple , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : str ) -> Optional[int]:
_lowerCamelCase = [rgram for rgrams in rgramslist for rgram in rgrams]
_lowerCamelCase = Counter(lowercase_ )
_lowerCamelCase = Counter(lowercase_ )
_lowerCamelCase = Counter()
for sgram, scount in sgramcounter.items():
_lowerCamelCase = scount * numref
_lowerCamelCase = Counter(lowercase_ )
_lowerCamelCase = Counter()
for cgram, ccount in cgramcounter.items():
_lowerCamelCase = ccount * numref
# KEEP
_lowerCamelCase = sgramcounter_rep & cgramcounter_rep
_lowerCamelCase = keepgramcounter_rep & rgramcounter
_lowerCamelCase = sgramcounter_rep & rgramcounter
_lowerCamelCase = 0
_lowerCamelCase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCamelCase = 1
_lowerCamelCase = 1
if len(lowercase_ ) > 0:
_lowerCamelCase = keeptmpscorea / len(lowercase_ )
if len(lowercase_ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_lowerCamelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_lowerCamelCase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_lowerCamelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_lowerCamelCase = sgramcounter_rep - cgramcounter_rep
_lowerCamelCase = delgramcounter_rep - rgramcounter
_lowerCamelCase = sgramcounter_rep - rgramcounter
_lowerCamelCase = 0
_lowerCamelCase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCamelCase = 1
if len(lowercase_ ) > 0:
_lowerCamelCase = deltmpscorea / len(lowercase_ )
# ADDITION
_lowerCamelCase = set(lowercase_ ) - set(lowercase_ )
_lowerCamelCase = set(lowercase_ ) & set(lowercase_ )
_lowerCamelCase = set(lowercase_ ) - set(lowercase_ )
_lowerCamelCase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCamelCase = 1
_lowerCamelCase = 1
if len(lowercase_ ) > 0:
_lowerCamelCase = addtmpscore / len(lowercase_ )
if len(lowercase_ ) > 0:
_lowerCamelCase = addtmpscore / len(lowercase_ )
_lowerCamelCase = 0
if addscore_precision > 0 or addscore_recall > 0:
_lowerCamelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : str ) -> List[str]:
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = ssent.split(''' ''' )
_lowerCamelCase = csent.split(''' ''' )
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
for rsent in rsents:
_lowerCamelCase = rsent.split(''' ''' )
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
ragramslist.append(lowercase_ )
for i in range(0 , len(lowercase_ ) - 1 ):
if i < len(lowercase_ ) - 1:
_lowerCamelCase = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(lowercase_ )
if i < len(lowercase_ ) - 2:
_lowerCamelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(lowercase_ )
if i < len(lowercase_ ) - 3:
_lowerCamelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(lowercase_ )
ragramslist.append(lowercase_ )
ragramslist.append(lowercase_ )
ragramslist.append(lowercase_ )
for i in range(0 , len(lowercase_ ) - 1 ):
if i < len(lowercase_ ) - 1:
_lowerCamelCase = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(lowercase_ )
if i < len(lowercase_ ) - 2:
_lowerCamelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(lowercase_ )
if i < len(lowercase_ ) - 3:
_lowerCamelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(lowercase_ )
for i in range(0 , len(lowercase_ ) - 1 ):
if i < len(lowercase_ ) - 1:
_lowerCamelCase = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(lowercase_ )
if i < len(lowercase_ ) - 2:
_lowerCamelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(lowercase_ )
if i < len(lowercase_ ) - 3:
_lowerCamelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(lowercase_ )
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = SARIngram(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = SARIngram(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = SARIngram(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = SARIngram(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
_lowerCamelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_lowerCamelCase = sum([delascore, delascore, delascore, delascore] ) / 4
_lowerCamelCase = sum([addascore, addascore, addascore, addascore] ) / 4
_lowerCamelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : bool = True , lowercase_ : str = "13a" , lowercase_ : bool = True ) -> int:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
_lowerCamelCase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_lowerCamelCase = sacrebleu.metrics.bleu._get_tokenizer(lowercase_ )()(lowercase_ )
else:
_lowerCamelCase = sacrebleu.TOKENIZERS[tokenizer]()(lowercase_ )
elif tokenizer == "moses":
_lowerCamelCase = sacremoses.MosesTokenizer().tokenize(lowercase_ , return_str=lowercase_ , escape=lowercase_ )
elif tokenizer == "penn":
_lowerCamelCase = sacremoses.MosesTokenizer().penn_tokenize(lowercase_ , return_str=lowercase_ )
else:
_lowerCamelCase = sentence
if not return_str:
_lowerCamelCase = normalized_sent.split()
return normalized_sent
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Any , lowercase_ : List[Any] ) -> Optional[int]:
if not (len(lowercase_ ) == len(lowercase_ ) == len(lowercase_ )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_lowerCamelCase = 0
for src, pred, refs in zip(lowercase_ , lowercase_ , lowercase_ ):
sari_score += SARIsent(normalize(lowercase_ ) , normalize(lowercase_ ) , [normalize(lowercase_ ) for sent in refs] )
_lowerCamelCase = sari_score / len(lowercase_ )
return 1_00 * sari_score
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Any , lowercase_ : List[Any]="exp" , lowercase_ : List[Any]=None , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=False , lowercase_ : List[Any]=False , ) -> Dict:
_lowerCamelCase = len(references[0] )
if any(len(lowercase_ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_lowerCamelCase = [[refs[i] for refs in references] for i in range(lowercase_ )]
_lowerCamelCase = sacrebleu.corpus_bleu(
lowercase_ , lowercase_ , smooth_method=lowercase_ , smooth_value=lowercase_ , force=lowercase_ , lowercase=lowercase_ , use_effective_order=lowercase_ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCamelCase_( datasets.Metric ):
'''simple docstring'''
def snake_case__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = {}
result.update({'''sari''': compute_sari(sources=lowerCamelCase__ , predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
result.update({'''exact''': compute_em(predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
return result
| 661 | 1 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 661 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 661 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/config.json''',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : int = 'xglm'
lowercase__ : Any = ['past_key_values']
lowercase__ : Optional[int] = {
'num_attention_heads': 'attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'num_layers',
}
def __init__( self , lowerCamelCase__=2_5_6_0_0_8 , lowerCamelCase__=2_0_4_8 , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=4_0_9_6 , lowerCamelCase__=2_4 , lowerCamelCase__=1_6 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0_2 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=2 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , **lowerCamelCase__ , ):
_lowerCamelCase = vocab_size
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = d_model
_lowerCamelCase = ffn_dim
_lowerCamelCase = num_layers
_lowerCamelCase = attention_heads
_lowerCamelCase = activation_function
_lowerCamelCase = dropout
_lowerCamelCase = attention_dropout
_lowerCamelCase = activation_dropout
_lowerCamelCase = layerdrop
_lowerCamelCase = init_std
_lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCamelCase = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
| 661 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__SCREAMING_SNAKE_CASE : List[str] = {value: key for key, value in MORSE_CODE_DICT.items()}
def lowerCAmelCase_( lowercase_ : str ) -> str:
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def lowerCAmelCase_( lowercase_ : str ) -> str:
return "".join(REVERSE_DICT[char] for char in message.split() )
def lowerCAmelCase_( ) -> None:
_lowerCamelCase = '''Morse code here!'''
print(lowercase_ )
_lowerCamelCase = encrypt(lowercase_ )
print(lowercase_ )
_lowerCamelCase = decrypt(lowercase_ )
print(lowercase_ )
if __name__ == "__main__":
main()
| 661 | 1 |
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowerCAmelCase_( lowercase_ : dict ) -> tuple:
return (data["data"], data["target"])
def lowerCAmelCase_( lowercase_ : np.ndarray , lowercase_ : np.ndarray ) -> XGBClassifier:
_lowerCamelCase = XGBClassifier()
classifier.fit(lowercase_ , lowercase_ )
return classifier
def lowerCAmelCase_( ) -> None:
_lowerCamelCase = load_iris()
_lowerCamelCase , _lowerCamelCase = data_handling(lowercase_ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = train_test_split(
lowercase_ , lowercase_ , test_size=0.2_5 )
_lowerCamelCase = iris['''target_names''']
# Create an XGBoost Classifier from the training data
_lowerCamelCase = xgboost(lowercase_ , lowercase_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase_ , lowercase_ , lowercase_ , display_labels=lowercase_ , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 661 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__SCREAMING_SNAKE_CASE : List[Any] = True
except (ImportError, AttributeError):
__SCREAMING_SNAKE_CASE : List[Any] = object
def lowerCAmelCase_( *lowercase_ : Dict , **lowercase_ : str ) -> str:
pass
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Any = logging.get_logger('''transformers-cli/serving''')
def lowerCAmelCase_( lowercase_ : Namespace ) -> List[Any]:
_lowerCamelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(lowercase_ , args.host , args.port , args.workers )
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : dict
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[str]
lowercase__ : Optional[List[int]]
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : str
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Any
class lowerCamelCase_( A__ ):
'''simple docstring'''
@staticmethod
def snake_case__ ( lowerCamelCase__ ):
_lowerCamelCase = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=lowerCamelCase__ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=lowerCamelCase__ , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=lowerCamelCase__ , default=8_8_8_8 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=lowerCamelCase__ , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=lowerCamelCase__ , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=lowerCamelCase__ , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=lowerCamelCase__ , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=lowerCamelCase__ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = pipeline
_lowerCamelCase = host
_lowerCamelCase = port
_lowerCamelCase = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F"""Serving model over {host}:{port}""" )
_lowerCamelCase = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''POST'''] , ),
] , timeout=6_0_0 , )
def snake_case__ ( self ):
run(self._app , host=self.host , port=self.port , workers=self.workers )
def snake_case__ ( self ):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def snake_case__ ( self , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) ):
try:
_lowerCamelCase = self._pipeline.tokenizer.tokenize(lowerCamelCase__ )
if return_ids:
_lowerCamelCase = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
return ServeTokenizeResult(tokens=lowerCamelCase__ , tokens_ids=lowerCamelCase__ )
else:
return ServeTokenizeResult(tokens=lowerCamelCase__ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'''model''': '''''', '''error''': str(lowerCamelCase__ )} )
def snake_case__ ( self , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , lowerCamelCase__ = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , ):
try:
_lowerCamelCase = self._pipeline.tokenizer.decode(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return ServeDeTokenizeResult(model='''''' , text=lowerCamelCase__ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'''model''': '''''', '''error''': str(lowerCamelCase__ )} )
async def snake_case__ ( self , lowerCamelCase__=Body(lowerCamelCase__ , embed=lowerCamelCase__ ) ):
# Check we don't have empty string
if len(lowerCamelCase__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
_lowerCamelCase = self._pipeline(lowerCamelCase__ )
return ServeForwardResult(output=lowerCamelCase__ )
except Exception as e:
raise HTTPException(5_0_0 , {'''error''': str(lowerCamelCase__ )} )
| 661 | 1 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER', 'False' ) ) is not True, reason='Skipping test because should only be run when releasing minor transformers version', )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 650, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=lowerCamelCase__ , )
assert hasattr(self , '''env''' )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = F"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
_lowerCamelCase = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=lowerCamelCase__ , instance_count=lowerCamelCase__ , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase__ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase__ , py_version='''py36''' , )
def snake_case__ ( self , lowerCamelCase__ ):
TrainingJobAnalytics(lowerCamelCase__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def snake_case__ ( self , lowerCamelCase__ ):
# create estimator
_lowerCamelCase = self.create_estimator(lowerCamelCase__ )
# run training
estimator.fit()
# result dataframe
_lowerCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
_lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , lowerCamelCase__ )
| 661 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Dict ) -> List[Any]:
assert isinstance(lowercase_ , lowercase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : str ) -> List[Any]:
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCamelCase = JsonDatasetReader(lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read()
_check_json_dataset(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : int ) -> Optional[int]:
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowerCamelCase = features.copy() if features else default_expected_features
_lowerCamelCase = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase = JsonDatasetReader(lowercase_ , features=lowercase_ , cache_dir=lowercase_ ).read()
_check_json_dataset(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : List[Any] ) -> Tuple:
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
_lowerCamelCase = features.copy() if features else default_expected_features
_lowerCamelCase = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase = JsonDatasetReader(lowercase_ , features=lowercase_ , cache_dir=lowercase_ ).read()
assert isinstance(lowercase_ , lowercase_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Optional[int] ) -> List[str]:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
_lowerCamelCase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
_lowerCamelCase = features.copy()
_lowerCamelCase = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = JsonDatasetReader(lowercase_ , features=lowercase_ , cache_dir=lowercase_ ).read()
assert isinstance(lowercase_ , lowercase_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] ) -> int:
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowerCamelCase = JsonDatasetReader(lowercase_ , cache_dir=lowercase_ , split=lowercase_ ).read()
_check_json_dataset(lowercase_ , lowercase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Tuple ) -> Optional[int]:
if issubclass(lowercase_ , lowercase_ ):
_lowerCamelCase = jsonl_path
elif issubclass(lowercase_ , lowercase_ ):
_lowerCamelCase = [jsonl_path]
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowerCamelCase = JsonDatasetReader(lowercase_ , cache_dir=lowercase_ ).read()
_check_json_dataset(lowercase_ , lowercase_ )
def lowerCAmelCase_( lowercase_ : int , lowercase_ : List[str] , lowercase_ : str=("train",) ) -> List[str]:
assert isinstance(lowercase_ , lowercase_ )
for split in splits:
_lowerCamelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : int ) -> Dict:
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCamelCase = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read()
_check_json_datasetdict(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase_( lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] ) -> List[Any]:
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowerCamelCase = features.copy() if features else default_expected_features
_lowerCamelCase = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase = JsonDatasetReader({'''train''': jsonl_path} , features=lowercase_ , cache_dir=lowercase_ ).read()
_check_json_datasetdict(lowercase_ , lowercase_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase_( lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : List[str] ) -> Optional[Any]:
if split:
_lowerCamelCase = {split: jsonl_path}
else:
_lowerCamelCase = '''train'''
_lowerCamelCase = {'''train''': jsonl_path, '''test''': jsonl_path}
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowerCamelCase = JsonDatasetReader(lowercase_ , cache_dir=lowercase_ ).read()
_check_json_datasetdict(lowercase_ , lowercase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_( lowercase_ : Optional[int] ) -> Optional[Any]:
return json.load(lowercase_ )
def lowerCAmelCase_( lowercase_ : Tuple ) -> Tuple:
return [json.loads(lowercase_ ) for line in buffer]
class lowerCamelCase_:
'''simple docstring'''
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , lines=lowerCamelCase__ ).write()
buffer.seek(0 )
_lowerCamelCase = load_json_function(lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert isinstance(exported_content[0] , lowerCamelCase__ )
assert len(lowerCamelCase__ ) == 1_0
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , lines=lowerCamelCase__ , orient=lowerCamelCase__ ).write()
buffer.seek(0 )
_lowerCamelCase = load_json(lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCamelCase__ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(lowerCamelCase__ ) == 1_0
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , lines=lowerCamelCase__ , num_proc=2 ).write()
buffer.seek(0 )
_lowerCamelCase = load_json_function(lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert isinstance(exported_content[0] , lowerCamelCase__ )
assert len(lowerCamelCase__ ) == 1_0
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , lines=lowerCamelCase__ , orient=lowerCamelCase__ , num_proc=2 ).write()
buffer.seek(0 )
_lowerCamelCase = load_json(lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCamelCase__ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(lowerCamelCase__ ) == 1_0
def snake_case__ ( self , lowerCamelCase__ ):
with pytest.raises(lowerCamelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = tmp_path_factory.mktemp('''data''' ) / F"""test.json.{extension}"""
_lowerCamelCase = str(shared_datadir / F"""test_file.json.{extension}""" )
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , compression=lowerCamelCase__ ).write()
with fsspec.open(lowerCamelCase__ , '''rb''' , compression='''infer''' ) as f:
_lowerCamelCase = f.read()
with fsspec.open(lowerCamelCase__ , '''rb''' , compression='''infer''' ) as f:
_lowerCamelCase = f.read()
assert exported_content == original_content
| 661 | 1 |
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCAmelCase_( lowercase_ : List[str] ) -> Union[str, Any]: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCAmelCase_( ) -> str:
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
_lowerCamelCase = [1, 2, 3]
with pytest.raises(lowercase_ ):
with parallel_backend('''unsupported backend''' ):
map_nested(lowercase_ , lowercase_ , num_proc=2 )
with pytest.raises(lowercase_ ):
with parallel_backend('''unsupported backend''' ):
map_nested(lowercase_ , lowercase_ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1] )
def lowerCAmelCase_( lowercase_ : List[str] ) -> Dict:
_lowerCamelCase = [1, 2]
_lowerCamelCase = {'''a''': 1, '''b''': 2}
_lowerCamelCase = {'''a''': [1, 2], '''b''': [3, 4]}
_lowerCamelCase = {'''a''': {'''1''': 1}, '''b''': 2}
_lowerCamelCase = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
_lowerCamelCase = [2, 3]
_lowerCamelCase = {'''a''': 2, '''b''': 3}
_lowerCamelCase = {'''a''': [2, 3], '''b''': [4, 5]}
_lowerCamelCase = {'''a''': {'''1''': 2}, '''b''': 3}
_lowerCamelCase = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(lowercase_ , lowercase_ , num_proc=lowercase_ ) == expected_map_nested_sa
assert map_nested(lowercase_ , lowercase_ , num_proc=lowercase_ ) == expected_map_nested_sa
assert map_nested(lowercase_ , lowercase_ , num_proc=lowercase_ ) == expected_map_nested_sa
assert map_nested(lowercase_ , lowercase_ , num_proc=lowercase_ ) == expected_map_nested_sa
assert map_nested(lowercase_ , lowercase_ , num_proc=lowercase_ ) == expected_map_nested_sa
| 661 |
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=3 , lowerCamelCase__=3_2 , lowerCamelCase__=3 , lowerCamelCase__=1_0 , lowerCamelCase__=[8, 1_6, 3_2, 6_4] , lowerCamelCase__=[1, 1, 2, 1] , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="relu" , lowerCamelCase__=3 , lowerCamelCase__=None , lowerCamelCase__=["stage2", "stage3", "stage4"] , lowerCamelCase__=[2, 3, 4] , lowerCamelCase__=1 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = num_channels
_lowerCamelCase = embeddings_size
_lowerCamelCase = hidden_sizes
_lowerCamelCase = depths
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_act
_lowerCamelCase = num_labels
_lowerCamelCase = scope
_lowerCamelCase = len(lowerCamelCase__ )
_lowerCamelCase = out_features
_lowerCamelCase = out_indices
_lowerCamelCase = num_groups
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels )
_lowerCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = BitModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = BitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = BitBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_lowerCamelCase = None
_lowerCamelCase = BitBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Dict = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowercase__ : Any = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : Union[str, Any] = False
lowercase__ : List[Any] = False
lowercase__ : Any = False
lowercase__ : List[str] = False
lowercase__ : Any = False
def snake_case__ ( self ):
_lowerCamelCase = BitModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def snake_case__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self ):
return
@unittest.skip(reason='''Bit does not output attentions''' )
def snake_case__ ( self ):
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(config=lowerCamelCase__ )
for name, module in model.named_modules():
if isinstance(lowerCamelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def snake_case__ ( self ):
def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_lowerCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCamelCase = layer_type
_lowerCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def snake_case__ ( self ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = BitModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase_( ) -> List[Any]:
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def snake_case__ ( self ):
_lowerCamelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase__ )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_lowerCamelCase = model(**lowerCamelCase__ )
# verify the logits
_lowerCamelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowerCamelCase = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
@require_torch
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = (BitBackbone,) if is_torch_available() else ()
lowercase__ : Tuple = BitConfig
lowercase__ : Any = False
def snake_case__ ( self ):
_lowerCamelCase = BitModelTester(self )
| 661 | 1 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class lowerCamelCase_( logging.LoggerAdapter ):
'''simple docstring'''
@staticmethod
def snake_case__ ( lowerCamelCase__ ):
_lowerCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
_lowerCamelCase = kwargs.pop('''main_process_only''' , lowerCamelCase__ )
_lowerCamelCase = kwargs.pop('''in_order''' , lowerCamelCase__ )
if self.isEnabledFor(lowerCamelCase__ ):
if self._should_log(lowerCamelCase__ ):
_lowerCamelCase , _lowerCamelCase = self.process(lowerCamelCase__ , lowerCamelCase__ )
self.logger.log(lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
elif in_order:
_lowerCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_lowerCamelCase , _lowerCamelCase = self.process(lowerCamelCase__ , lowerCamelCase__ )
self.logger.log(lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
state.wait_for_everyone()
def lowerCAmelCase_( lowercase_ : str , lowercase_ : str = None ) -> int:
if log_level is None:
_lowerCamelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , lowercase_ )
_lowerCamelCase = logging.getLogger(lowercase_ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(lowercase_ , {} )
| 661 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=2 , lowerCamelCase__=2_4 , lowerCamelCase__=1_6 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=3_2 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1_0 , lowerCamelCase__=0.0_2 , lowerCamelCase__=None , lowerCamelCase__=2 , lowerCamelCase__=2 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = patch_size
_lowerCamelCase = max_length
_lowerCamelCase = num_mel_bins
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = scope
_lowerCamelCase = frequency_stride
_lowerCamelCase = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCamelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_lowerCamelCase = (self.max_length - self.patch_size) // self.time_stride + 1
_lowerCamelCase = frequency_out_dimension * time_out_dimension
_lowerCamelCase = num_patches + 2
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = self.get_config()
return config, input_values, labels
def snake_case__ ( self ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = ASTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = config_and_inputs
_lowerCamelCase = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Any = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowercase__ : List[str] = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
lowercase__ : int = False
lowercase__ : str = False
lowercase__ : Union[str, Any] = False
lowercase__ : List[str] = False
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def snake_case__ ( self ):
_lowerCamelCase = ASTModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=3_7 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''input_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@slow
def snake_case__ ( self ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = ASTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase_( ) -> str:
_lowerCamelCase = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
_lowerCamelCase , _lowerCamelCase = torchaudio.load(lowercase_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def snake_case__ ( self ):
_lowerCamelCase = self.default_feature_extractor
_lowerCamelCase = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(lowerCamelCase__ )
_lowerCamelCase = self.default_feature_extractor
_lowerCamelCase , _lowerCamelCase = prepare_audio()
_lowerCamelCase = audio.squeeze().numpy()
_lowerCamelCase = feature_extractor(lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_lowerCamelCase = model(**lowerCamelCase__ )
# verify the logits
_lowerCamelCase = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowerCamelCase = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 661 | 1 |
"""simple docstring"""
from typing import Any
def lowerCAmelCase_( lowercase_ : list ) -> list[Any]:
if not input_list:
return []
_lowerCamelCase = [input_list.count(lowercase_ ) for value in input_list]
_lowerCamelCase = max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowerCamelCase_:
'''simple docstring'''
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
return None
class lowerCamelCase_:
'''simple docstring'''
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
return None
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
lowercase__ : Tuple = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def snake_case__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase__ , '''tf''' , 1_2 , **lowerCamelCase__ )
@require_torch
@slow
def snake_case__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase__ , '''pt''' , 1_2 , **lowerCamelCase__ )
@require_torch
@slow
def snake_case__ ( self ):
from transformers import BertModel
_lowerCamelCase = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(lowerCamelCase__ ) )
vocab_file.flush()
_lowerCamelCase = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
_lowerCamelCase = BertModel(BertConfig(vocab_size=len(lowerCamelCase__ ) ) )
model.save_pretrained(lowerCamelCase__ )
self._test_export(lowerCamelCase__ , '''pt''' , 1_2 , lowerCamelCase__ )
@require_tf
@slow
def snake_case__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_lowerCamelCase = self._test_export(lowerCamelCase__ , '''tf''' , 1_2 , **lowerCamelCase__ )
_lowerCamelCase = quantize(Path(lowerCamelCase__ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase__ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def snake_case__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_lowerCamelCase = self._test_export(lowerCamelCase__ , '''pt''' , 1_2 , **lowerCamelCase__ )
_lowerCamelCase = quantize(lowerCamelCase__ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase__ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
_lowerCamelCase = Path(lowerCamelCase__ ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
return path
except Exception as e:
self.fail(lowerCamelCase__ )
@require_torch
@require_tokenizers
@slow
def snake_case__ ( self ):
from transformers import BertModel
_lowerCamelCase = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
_lowerCamelCase = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(lowerCamelCase__ , lowerCamelCase__ , '''pt''' )
@require_tf
@require_tokenizers
@slow
def snake_case__ ( self ):
from transformers import TFBertModel
_lowerCamelCase = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
_lowerCamelCase = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(lowerCamelCase__ , lowerCamelCase__ , '''tf''' )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = FeatureExtractionPipeline(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = infer_shapes(lowerCamelCase__ , lowerCamelCase__ )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , lowerCamelCase__ )
self.assertSequenceEqual(variable_names[3:] , lowerCamelCase__ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def snake_case__ ( self ):
_lowerCamelCase = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
_lowerCamelCase = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
_lowerCamelCase , _lowerCamelCase = ensure_valid_input(FuncContiguousArgs() , lowerCamelCase__ , lowerCamelCase__ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase__ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase__ ) , set(lowerCamelCase__ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase__ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
_lowerCamelCase , _lowerCamelCase = ensure_valid_input(FuncNonContiguousArgs() , lowerCamelCase__ , lowerCamelCase__ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase__ ) , 1 )
self.assertEqual(len(lowerCamelCase__ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def snake_case__ ( self ):
_lowerCamelCase = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
| 661 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : torch.FloatTensor
class lowerCamelCase_( A__, A__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCamelCase__ = 3 , lowerCamelCase__ = 3 , lowerCamelCase__ = ("DownEncoderBlock2D",) , lowerCamelCase__ = ("UpDecoderBlock2D",) , lowerCamelCase__ = (6_4,) , lowerCamelCase__ = 1 , lowerCamelCase__ = "silu" , lowerCamelCase__ = 3 , lowerCamelCase__ = 3_2 , lowerCamelCase__ = 2_5_6 , lowerCamelCase__ = 3_2 , lowerCamelCase__ = None , lowerCamelCase__ = 0.1_8_2_1_5 , lowerCamelCase__ = "group" , ):
super().__init__()
# pass init params to Encoder
_lowerCamelCase = Encoder(
in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , down_block_types=lowerCamelCase__ , block_out_channels=lowerCamelCase__ , layers_per_block=lowerCamelCase__ , act_fn=lowerCamelCase__ , norm_num_groups=lowerCamelCase__ , double_z=lowerCamelCase__ , )
_lowerCamelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
_lowerCamelCase = nn.Convad(lowerCamelCase__ , lowerCamelCase__ , 1 )
_lowerCamelCase = VectorQuantizer(lowerCamelCase__ , lowerCamelCase__ , beta=0.2_5 , remap=lowerCamelCase__ , sane_index_shape=lowerCamelCase__ )
_lowerCamelCase = nn.Convad(lowerCamelCase__ , lowerCamelCase__ , 1 )
# pass init params to Decoder
_lowerCamelCase = Decoder(
in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , up_block_types=lowerCamelCase__ , block_out_channels=lowerCamelCase__ , layers_per_block=lowerCamelCase__ , act_fn=lowerCamelCase__ , norm_num_groups=lowerCamelCase__ , norm_type=lowerCamelCase__ , )
@apply_forward_hook
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = True ):
_lowerCamelCase = self.encoder(lowerCamelCase__ )
_lowerCamelCase = self.quant_conv(lowerCamelCase__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCamelCase__ )
@apply_forward_hook
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = False , lowerCamelCase__ = True ):
# also go through quantization layer
if not force_not_quantize:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self.quantize(lowerCamelCase__ )
else:
_lowerCamelCase = h
_lowerCamelCase = self.post_quant_conv(lowerCamelCase__ )
_lowerCamelCase = self.decoder(lowerCamelCase__ , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = True ):
_lowerCamelCase = sample
_lowerCamelCase = self.encode(lowerCamelCase__ ).latents
_lowerCamelCase = self.decode(lowerCamelCase__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase__ )
| 661 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = {str(digit): digit**5 for digit in range(1_0)}
def lowerCAmelCase_( lowercase_ : int ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowercase_ ) )
def lowerCAmelCase_( ) -> int:
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(lowercase_ ) )
if __name__ == "__main__":
print(solution())
| 661 | 1 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__ ( self ):
_lowerCamelCase = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_lowerCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_lowerCamelCase = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_lowerCamelCase = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_lowerCamelCase = shift_tokens_right(lowerCamelCase__ , model.config.pad_token_id , model.config.decoder_start_token_id )
_lowerCamelCase = model(lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ ).logits
_lowerCamelCase = optax.softmax_cross_entropy(lowerCamelCase__ , onehot(lowerCamelCase__ , logits.shape[-1] ) ).mean()
_lowerCamelCase = -(labels.shape[-1] * loss.item())
_lowerCamelCase = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 661 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
__SCREAMING_SNAKE_CASE : str = tuple[int, int]
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = vertices
_lowerCamelCase = {
(min(lowerCamelCase__ ), max(lowerCamelCase__ )): weight for edge, weight in edges.items()
}
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_lowerCamelCase = weight
def snake_case__ ( self ):
_lowerCamelCase = Graph({min(self.vertices )} , {} )
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
while len(subgraph.vertices ) < len(self.vertices ):
_lowerCamelCase = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_lowerCamelCase = edge
_lowerCamelCase = weight
subgraph.add_edge(lowerCamelCase__ , lowerCamelCase__ )
return subgraph
def lowerCAmelCase_( lowercase_ : str = "p107_network.txt" ) -> int:
_lowerCamelCase = os.path.abspath(os.path.dirname(lowercase_ ) )
_lowerCamelCase = os.path.join(lowercase_ , lowercase_ )
_lowerCamelCase = {}
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
with open(lowercase_ ) as f:
_lowerCamelCase = f.read().strip().split('''\n''' )
_lowerCamelCase = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(lowercase_ ) ):
for edgea in range(lowercase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
_lowerCamelCase = int(adjaceny_matrix[edgea][edgea] )
_lowerCamelCase = Graph(set(range(len(lowercase_ ) ) ) , lowercase_ )
_lowerCamelCase = graph.prims_algorithm()
_lowerCamelCase = sum(graph.edges.values() )
_lowerCamelCase = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 661 | 1 |
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=9_9 , lowerCamelCase__=3_2 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_0 , lowerCamelCase__=0.0_2 , lowerCamelCase__=True , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_input_mask
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = initializer_range
_lowerCamelCase = use_labels
_lowerCamelCase = scope
def snake_case__ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_input_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def snake_case__ ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def snake_case__ ( self ):
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = self.prepare_config_and_inputs()
_lowerCamelCase = True
_lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ , ):
_lowerCamelCase = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ , ):
_lowerCamelCase = True
_lowerCamelCase = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , )
_lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ , ):
_lowerCamelCase = True
_lowerCamelCase = True
_lowerCamelCase = BertGenerationDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
# first forward pass
_lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ , )
_lowerCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
_lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )['''hidden_states'''][0]
_lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )['''hidden_states'''][0]
# select random slice
_lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ , ):
_lowerCamelCase = BertGenerationDecoder(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase__ : Optional[int] = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase__ : List[Any] = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def snake_case__ ( self ):
_lowerCamelCase = BertGenerationEncoderTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=3_7 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase = '''bert'''
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase__ )
def snake_case__ ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
_lowerCamelCase = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def snake_case__ ( self ):
_lowerCamelCase = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__ ( self ):
_lowerCamelCase = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
_lowerCamelCase = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
_lowerCamelCase = model(lowerCamelCase__ )[0]
_lowerCamelCase = torch.Size([1, 8, 1_0_2_4] )
self.assertEqual(output.shape , lowerCamelCase__ )
_lowerCamelCase = torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
@require_torch
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__ ( self ):
_lowerCamelCase = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
_lowerCamelCase = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
_lowerCamelCase = model(lowerCamelCase__ )[0]
_lowerCamelCase = torch.Size([1, 8, 5_0_3_5_8] )
self.assertEqual(output.shape , lowerCamelCase__ )
_lowerCamelCase = torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 661 |
"""simple docstring"""
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCAmelCase_( lowercase_ : int = 2_00_00_00 ) -> int:
_lowerCamelCase = [0]
_lowerCamelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_lowerCamelCase = 0
# the area corresponding to the grid that gives the product closest to target
_lowerCamelCase = 0
# an estimate of b, using the quadratic formula
_lowerCamelCase = 42
# the largest integer less than b_estimate
_lowerCamelCase = 42
# the largest integer less than b_estimate
_lowerCamelCase = 42
# the triangle number corresponding to b_floor
_lowerCamelCase = 42
# the triangle number corresponding to b_ceil
_lowerCamelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_lowerCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_lowerCamelCase = floor(lowercase_ )
_lowerCamelCase = ceil(lowercase_ )
_lowerCamelCase = triangle_numbers[b_floor]
_lowerCamelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_lowerCamelCase = triangle_b_first_guess * triangle_a
_lowerCamelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_lowerCamelCase = triangle_b_second_guess * triangle_a
_lowerCamelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 661 | 1 |
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : BigBirdConfig
lowercase__ : jnp.dtype = jnp.floataa
lowercase__ : bool = True
def snake_case__ ( self ):
super().setup()
_lowerCamelCase = nn.Dense(5 , dtype=self.dtype )
def __call__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
_lowerCamelCase = super().__call__(*lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[str] = FlaxBigBirdForNaturalQuestionsModule
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[Any] ) -> Tuple:
def cross_entropy(lowercase_ : Dict , lowercase_ : str , lowercase_ : Tuple=None ):
_lowerCamelCase = logits.shape[-1]
_lowerCamelCase = (labels[..., None] == jnp.arange(lowercase_ )[None]).astype('''f4''' )
_lowerCamelCase = jax.nn.log_softmax(lowercase_ , axis=-1 )
_lowerCamelCase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
_lowerCamelCase = reduction(lowercase_ )
return loss
_lowerCamelCase = partial(lowercase_ , reduction=jnp.mean )
_lowerCamelCase = cross_entropy(lowercase_ , lowercase_ )
_lowerCamelCase = cross_entropy(lowercase_ , lowercase_ )
_lowerCamelCase = cross_entropy(lowercase_ , lowercase_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class lowerCamelCase_:
'''simple docstring'''
lowercase__ : str = "google/bigbird-roberta-base"
lowercase__ : int = 3_000
lowercase__ : int = 10_500
lowercase__ : int = 128
lowercase__ : int = 3
lowercase__ : int = 1
lowercase__ : int = 5
# tx_args
lowercase__ : float = 3E-5
lowercase__ : float = 0.0
lowercase__ : int = 20_000
lowercase__ : float = 0.0_095
lowercase__ : str = "bigbird-roberta-natural-questions"
lowercase__ : str = "training-expt"
lowercase__ : str = "data/nq-training.jsonl"
lowercase__ : str = "data/nq-validation.jsonl"
def snake_case__ ( self ):
os.makedirs(self.base_dir , exist_ok=lowerCamelCase__ )
_lowerCamelCase = os.path.join(self.base_dir , self.save_dir )
_lowerCamelCase = self.batch_size_per_device * jax.device_count()
@dataclass
class lowerCamelCase_:
'''simple docstring'''
lowercase__ : int
lowercase__ : int = 4_096 # no dynamic padding on TPUs
def __call__( self , lowerCamelCase__ ):
_lowerCamelCase = self.collate_fn(lowerCamelCase__ )
_lowerCamelCase = jax.tree_util.tree_map(lowerCamelCase__ , lowerCamelCase__ )
return batch
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase , _lowerCamelCase = self.fetch_inputs(features['''input_ids'''] )
_lowerCamelCase = {
'''input_ids''': jnp.array(lowerCamelCase__ , dtype=jnp.intaa ),
'''attention_mask''': jnp.array(lowerCamelCase__ , dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ),
}
return batch
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = [self._fetch_inputs(lowerCamelCase__ ) for ids in input_ids]
return zip(*lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = [1 for _ in range(len(lowerCamelCase__ ) )]
while len(lowerCamelCase__ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def lowerCAmelCase_( lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Any=None ) -> Dict:
if seed is not None:
_lowerCamelCase = dataset.shuffle(seed=lowercase_ )
for i in range(len(lowercase_ ) // batch_size ):
_lowerCamelCase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowercase_ )
@partial(jax.pmap , axis_name='''batch''' )
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : Dict , **lowercase_ : Optional[int] ) -> str:
def loss_fn(lowercase_ : Dict ):
_lowerCamelCase = model_inputs.pop('''start_labels''' )
_lowerCamelCase = model_inputs.pop('''end_labels''' )
_lowerCamelCase = model_inputs.pop('''pooled_labels''' )
_lowerCamelCase = state.apply_fn(**lowercase_ , params=lowercase_ , dropout_rng=lowercase_ , train=lowercase_ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = outputs
return state.loss_fn(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
_lowerCamelCase , _lowerCamelCase = jax.random.split(lowercase_ )
_lowerCamelCase = jax.value_and_grad(lowercase_ )
_lowerCamelCase , _lowerCamelCase = grad_fn(state.params )
_lowerCamelCase = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
_lowerCamelCase = jax.lax.pmean(lowercase_ , '''batch''' )
_lowerCamelCase = state.apply_gradients(grads=lowercase_ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def lowerCAmelCase_( lowercase_ : List[str] , **lowercase_ : List[Any] ) -> Union[str, Any]:
_lowerCamelCase = model_inputs.pop('''start_labels''' )
_lowerCamelCase = model_inputs.pop('''end_labels''' )
_lowerCamelCase = model_inputs.pop('''pooled_labels''' )
_lowerCamelCase = state.apply_fn(**lowercase_ , params=state.params , train=lowercase_ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = outputs
_lowerCamelCase = state.loss_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
_lowerCamelCase = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class lowerCamelCase_( train_state.TrainState ):
'''simple docstring'''
lowercase__ : Callable = struct.field(pytree_node=A__ )
@dataclass
class lowerCamelCase_:
'''simple docstring'''
lowercase__ : Args
lowercase__ : Callable
lowercase__ : Callable
lowercase__ : Callable
lowercase__ : Callable
lowercase__ : wandb
lowercase__ : Callable = None
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
_lowerCamelCase = model.params
_lowerCamelCase = TrainState.create(
apply_fn=model.__call__ , params=lowerCamelCase__ , tx=lowerCamelCase__ , loss_fn=lowerCamelCase__ , )
if ckpt_dir is not None:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = restore_checkpoint(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
_lowerCamelCase , _lowerCamelCase = build_tx(**lowerCamelCase__ )
_lowerCamelCase = train_state.TrainState(
step=lowerCamelCase__ , apply_fn=model.__call__ , params=lowerCamelCase__ , tx=lowerCamelCase__ , opt_state=lowerCamelCase__ , )
_lowerCamelCase = args
_lowerCamelCase = data_collator
_lowerCamelCase = lr
_lowerCamelCase = params
_lowerCamelCase = jax_utils.replicate(lowerCamelCase__ )
return state
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = self.args
_lowerCamelCase = len(lowerCamelCase__ ) // args.batch_size
_lowerCamelCase = jax.random.PRNGKey(0 )
_lowerCamelCase = jax.random.split(lowerCamelCase__ , jax.device_count() )
for epoch in range(args.max_epochs ):
_lowerCamelCase = jnp.array(0 , dtype=jnp.floataa )
_lowerCamelCase = get_batched_dataset(lowerCamelCase__ , args.batch_size , seed=lowerCamelCase__ )
_lowerCamelCase = 0
for batch in tqdm(lowerCamelCase__ , total=lowerCamelCase__ , desc=F"""Running EPOCH-{epoch}""" ):
_lowerCamelCase = self.data_collator(lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self.train_step_fn(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
_lowerCamelCase = jax_utils.unreplicate(state.step )
_lowerCamelCase = running_loss.item() / i
_lowerCamelCase = self.scheduler_fn(state_step - 1 )
_lowerCamelCase = self.evaluate(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(lowerCamelCase__ ) )
self.logger.log(lowerCamelCase__ , commit=lowerCamelCase__ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" , state=lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = get_batched_dataset(lowerCamelCase__ , self.args.batch_size )
_lowerCamelCase = len(lowerCamelCase__ ) // self.args.batch_size
_lowerCamelCase = jnp.array(0 , dtype=jnp.floataa )
_lowerCamelCase = 0
for batch in tqdm(lowerCamelCase__ , total=lowerCamelCase__ , desc='''Evaluating ... ''' ):
_lowerCamelCase = self.data_collator(lowerCamelCase__ )
_lowerCamelCase = self.val_step_fn(lowerCamelCase__ , **lowerCamelCase__ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = jax_utils.unreplicate(lowerCamelCase__ )
print(F"""SAVING CHECKPOINT IN {save_dir}""" , end=''' ... ''' )
self.model_save_fn(lowerCamelCase__ , params=state.params )
with open(os.path.join(lowerCamelCase__ , '''opt_state.msgpack''' ) , '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(lowerCamelCase__ , '''args.joblib''' ) )
joblib.dump(self.data_collator , os.path.join(lowerCamelCase__ , '''data_collator.joblib''' ) )
with open(os.path.join(lowerCamelCase__ , '''training_state.json''' ) , '''w''' ) as f:
json.dump({'''step''': state.step.item()} , lowerCamelCase__ )
print('''DONE''' )
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : str ) -> Optional[Any]:
print(F"""RESTORING CHECKPOINT FROM {save_dir}""" , end=''' ... ''' )
with open(os.path.join(lowercase_ , '''flax_model.msgpack''' ) , '''rb''' ) as f:
_lowerCamelCase = from_bytes(state.params , f.read() )
with open(os.path.join(lowercase_ , '''opt_state.msgpack''' ) , '''rb''' ) as f:
_lowerCamelCase = from_bytes(state.opt_state , f.read() )
_lowerCamelCase = joblib.load(os.path.join(lowercase_ , '''args.joblib''' ) )
_lowerCamelCase = joblib.load(os.path.join(lowercase_ , '''data_collator.joblib''' ) )
with open(os.path.join(lowercase_ , '''training_state.json''' ) , '''r''' ) as f:
_lowerCamelCase = json.load(lowercase_ )
_lowerCamelCase = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def lowerCAmelCase_( lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Tuple ) -> str:
_lowerCamelCase = num_train_steps - warmup_steps
_lowerCamelCase = optax.linear_schedule(init_value=lowercase_ , end_value=lowercase_ , transition_steps=lowercase_ )
_lowerCamelCase = optax.linear_schedule(init_value=lowercase_ , end_value=1e-7 , transition_steps=lowercase_ )
_lowerCamelCase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def lowerCAmelCase_( lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Tuple ) -> Optional[Any]:
def weight_decay_mask(lowercase_ : Optional[Any] ):
_lowerCamelCase = traverse_util.flatten_dict(lowercase_ )
_lowerCamelCase = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(lowercase_ )
_lowerCamelCase = scheduler_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
_lowerCamelCase = optax.adamw(learning_rate=lowercase_ , weight_decay=lowercase_ , mask=lowercase_ )
return tx, lr
| 661 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 661 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=1_8 , lowerCamelCase__=3_0 , lowerCamelCase__=4_0_0 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=[0.5, 0.5, 0.5] , lowerCamelCase__=[0.5, 0.5, 0.5] , ):
_lowerCamelCase = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = num_channels
_lowerCamelCase = image_size
_lowerCamelCase = min_resolution
_lowerCamelCase = max_resolution
_lowerCamelCase = do_resize
_lowerCamelCase = size
_lowerCamelCase = do_normalize
_lowerCamelCase = image_mean
_lowerCamelCase = image_std
def snake_case__ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Union[str, Any] = DPTImageProcessor if is_vision_available() else None
def snake_case__ ( self ):
_lowerCamelCase = DPTImageProcessingTester(self )
@property
def snake_case__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self ):
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''size''' ) )
def snake_case__ ( self ):
_lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8} )
_lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} )
def snake_case__ ( self ):
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_lowerCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def snake_case__ ( self ):
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_lowerCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def snake_case__ ( self ):
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_lowerCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 661 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
__SCREAMING_SNAKE_CASE : str = {
'''distilbert-base-uncased''': 5_1_2,
'''distilbert-base-uncased-distilled-squad''': 5_1_2,
'''distilbert-base-cased''': 5_1_2,
'''distilbert-base-cased-distilled-squad''': 5_1_2,
'''distilbert-base-german-cased''': 5_1_2,
'''distilbert-base-multilingual-cased''': 5_1_2,
}
__SCREAMING_SNAKE_CASE : str = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[Any] = VOCAB_FILES_NAMES
lowercase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : str = PRETRAINED_INIT_CONFIGURATION
lowercase__ : int = ['input_ids', 'attention_mask']
lowercase__ : Tuple = DistilBertTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__="[UNK]" , lowerCamelCase__="[SEP]" , lowerCamelCase__="[PAD]" , lowerCamelCase__="[CLS]" , lowerCamelCase__="[MASK]" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ):
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
_lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCamelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCamelCase__ ) != tokenize_chinese_chars
):
_lowerCamelCase = getattr(lowerCamelCase__ , normalizer_state.pop('''type''' ) )
_lowerCamelCase = do_lower_case
_lowerCamelCase = strip_accents
_lowerCamelCase = tokenize_chinese_chars
_lowerCamelCase = normalizer_class(**lowerCamelCase__ )
_lowerCamelCase = do_lower_case
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=None ):
_lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 661 | 1 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[int] = LongformerTokenizer
lowercase__ : str = True
lowercase__ : Optional[int] = LongformerTokenizerFast
lowercase__ : List[str] = True
def snake_case__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_lowerCamelCase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
_lowerCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_lowerCamelCase = {'''unk_token''': '''<unk>'''}
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase__ ) )
def snake_case__ ( self , **lowerCamelCase__ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def snake_case__ ( self , **lowerCamelCase__ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = '''lower newer'''
_lowerCamelCase = '''lower newer'''
return input_text, output_text
def snake_case__ ( self ):
_lowerCamelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCamelCase = '''lower newer'''
_lowerCamelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_lowerCamelCase = tokenizer.tokenize(lowerCamelCase__ ) # , add_prefix_space=True)
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = tokens + [tokenizer.unk_token]
_lowerCamelCase = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=lowerCamelCase__ ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=lowerCamelCase__ ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def snake_case__ ( self ):
_lowerCamelCase = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
_lowerCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCamelCase__ )
_lowerCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCamelCase__ )
_lowerCamelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
_lowerCamelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
_lowerCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
_lowerCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def snake_case__ ( self ):
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = '''Encode this sequence.'''
_lowerCamelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
_lowerCamelCase = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
_lowerCamelCase = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCamelCase__ , lowerCamelCase__ )
# Testing spaces after special tokens
_lowerCamelCase = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ )} ) # mask token has a left space
_lowerCamelCase = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
_lowerCamelCase = '''Encode <mask> sequence'''
_lowerCamelCase = '''Encode <mask>sequence'''
_lowerCamelCase = tokenizer.encode(lowerCamelCase__ )
_lowerCamelCase = encoded.index(lowerCamelCase__ )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = tokenizer.encode(lowerCamelCase__ )
_lowerCamelCase = encoded.index(lowerCamelCase__ )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = '''A, <mask> AllenNLP sentence.'''
_lowerCamelCase = tokenizer_r.encode_plus(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ )
_lowerCamelCase = tokenizer_p.encode_plus(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_lowerCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_lowerCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
lowerCamelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowerCamelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def snake_case__ ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
_lowerCamelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_lowerCamelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , lowerCamelCase__ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , lowerCamelCase__ )
self.assertEqual(post_processor_state['''trim_offsets'''] , lowerCamelCase__ )
def snake_case__ ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCamelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
_lowerCamelCase = F"""{text_of_1_token} {text_of_1_token}"""
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
_lowerCamelCase = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
_lowerCamelCase = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
_lowerCamelCase = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ), len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
_lowerCamelCase = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ), len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
_lowerCamelCase = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
_lowerCamelCase = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ) + 1, 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
_lowerCamelCase = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ), 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
_lowerCamelCase = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ), 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
| 661 |
"""simple docstring"""
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__SCREAMING_SNAKE_CASE : str = 2_9_9_7_9_2_4_5_8
# Symbols
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = symbols('''ct x y z''')
def lowerCAmelCase_( lowercase_ : float ) -> float:
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCAmelCase_( lowercase_ : float ) -> float:
return 1 / sqrt(1 - beta(lowercase_ ) ** 2 )
def lowerCAmelCase_( lowercase_ : float ) -> np.ndarray:
return np.array(
[
[gamma(lowercase_ ), -gamma(lowercase_ ) * beta(lowercase_ ), 0, 0],
[-gamma(lowercase_ ) * beta(lowercase_ ), gamma(lowercase_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCAmelCase_( lowercase_ : float , lowercase_ : np.ndarray | None = None ) -> np.ndarray:
# Ensure event is not empty
if event is None:
_lowerCamelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(lowercase_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__SCREAMING_SNAKE_CASE : List[str] = transform(2_9_9_7_9_2_4_5)
print('''Example of four vector: ''')
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
__SCREAMING_SNAKE_CASE : Tuple = {ct: c, x: 1, y: 1, z: 1}
__SCREAMING_SNAKE_CASE : int = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 661 | 1 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=2 , lowerCamelCase__=2_4 , lowerCamelCase__=1_6 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=3_2 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1_0 , lowerCamelCase__=0.0_2 , lowerCamelCase__=None , lowerCamelCase__=2 , lowerCamelCase__=2 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = patch_size
_lowerCamelCase = max_length
_lowerCamelCase = num_mel_bins
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = scope
_lowerCamelCase = frequency_stride
_lowerCamelCase = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCamelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_lowerCamelCase = (self.max_length - self.patch_size) // self.time_stride + 1
_lowerCamelCase = frequency_out_dimension * time_out_dimension
_lowerCamelCase = num_patches + 2
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = self.get_config()
return config, input_values, labels
def snake_case__ ( self ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = ASTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = config_and_inputs
_lowerCamelCase = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Any = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowercase__ : List[str] = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
lowercase__ : int = False
lowercase__ : str = False
lowercase__ : Union[str, Any] = False
lowercase__ : List[str] = False
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def snake_case__ ( self ):
_lowerCamelCase = ASTModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=3_7 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''input_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@slow
def snake_case__ ( self ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = ASTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase_( ) -> str:
_lowerCamelCase = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
_lowerCamelCase , _lowerCamelCase = torchaudio.load(lowercase_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def snake_case__ ( self ):
_lowerCamelCase = self.default_feature_extractor
_lowerCamelCase = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(lowerCamelCase__ )
_lowerCamelCase = self.default_feature_extractor
_lowerCamelCase , _lowerCamelCase = prepare_audio()
_lowerCamelCase = audio.squeeze().numpy()
_lowerCamelCase = feature_extractor(lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_lowerCamelCase = model(**lowerCamelCase__ )
# verify the logits
_lowerCamelCase = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowerCamelCase = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 661 |
"""simple docstring"""
from __future__ import annotations
__SCREAMING_SNAKE_CASE : Dict = 1.6_0_2_1e-1_9 # units = C
def lowerCAmelCase_( lowercase_ : float , lowercase_ : float , lowercase_ : float , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.