code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__magic_name__ : Dict = logging.get_logger(__name__)
__magic_name__ : str = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__magic_name__ : Tuple = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
__magic_name__ : Dict = {
"facebook/blenderbot_small-90M": 5_12,
}
class SCREAMING_SNAKE_CASE__ (lowercase__ ):
lowercase_ : List[str] = VOCAB_FILES_NAMES
lowercase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : List[Any] = BlenderbotSmallTokenizer
def __init__( self : Optional[int] , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Union[str, Any]="<|endoftext|>" , __lowerCamelCase : Dict="<|endoftext|>" , __lowerCamelCase : Any="<|endoftext|>" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Any=True , **__lowerCamelCase : Any , ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=__lowerCamelCase , merges=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , ) , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , **__lowerCamelCase , )
lowerCAmelCase__ = add_prefix_space
def A__ ( self : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict=None ):
"""simple docstring"""
lowerCAmelCase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] = None ):
"""simple docstring"""
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 615 |
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=9_9 , snake_case=6_4 , snake_case=5 , snake_case=4 , snake_case=6_4 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : List[Any] = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : int = seq_length
UpperCAmelCase : Dict = is_training
UpperCAmelCase : Optional[Any] = use_input_mask
UpperCAmelCase : Optional[Any] = use_token_type_ids
UpperCAmelCase : Optional[Any] = use_labels
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[str] = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Tuple = attention_probs_dropout_prob
UpperCAmelCase : Any = max_position_embeddings
UpperCAmelCase : Tuple = type_vocab_size
UpperCAmelCase : Union[str, Any] = type_sequence_label_size
UpperCAmelCase : int = initializer_range
UpperCAmelCase : Dict = num_labels
UpperCAmelCase : Union[str, Any] = num_choices
UpperCAmelCase : List[Any] = scope
def A_ ( self ):
'''simple docstring'''
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Any = None
if self.use_input_mask:
UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : str = None
UpperCAmelCase : Dict = None
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self ):
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MPNetModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Dict = model(snake_case , snake_case )
UpperCAmelCase : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : int = MPNetForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Dict = model(
snake_case , attention_mask=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : Optional[int] = MPNetForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Optional[int] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.num_choices
UpperCAmelCase : Optional[int] = MPNetForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Tuple = model(
snake_case , attention_mask=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.num_labels
UpperCAmelCase : Tuple = MPNetForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : str = config_and_inputs
UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Any = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : str = True
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MPNetModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=3_7 )
def A_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*snake_case )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = MPNetModel.from_pretrained("microsoft/mpnet-base" )
UpperCAmelCase : Optional[int] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCAmelCase : Optional[Any] = model(snake_case )[0]
UpperCAmelCase : Optional[int] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , snake_case )
UpperCAmelCase : Optional[Any] = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
| 679 | 0 |
from __future__ import annotations
_lowerCAmelCase : Tuple = 1.6021E-19 # units = C
def UpperCamelCase_( _snake_case : List[Any] , _snake_case : Any , _snake_case : Any , ):
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif conductivity < 0:
raise ValueError('Conductivity cannot be negative' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative' )
elif mobility < 0:
raise ValueError('mobility cannot be negative' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 242 |
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a : Optional[Any] = logging.get_logger(__name__)
a : List[str] = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
UpperCAmelCase : List[str] = TOKENIZER_CLASSES
else:
UpperCAmelCase : int = {tokenizer_name: getattr(__magic_name__ , tokenizer_name + "Fast" )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
UpperCAmelCase : Tuple = TOKENIZER_CLASSES[tokenizer_name]
UpperCAmelCase : Union[str, Any] = True
if checkpoint_name is None:
UpperCAmelCase : List[str] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
UpperCAmelCase : Dict = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
UpperCAmelCase : Union[str, Any] = tokenizer_class.from_pretrained(__magic_name__ , force_download=__magic_name__ )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCAmelCase , UpperCAmelCase : Dict = checkpoint.split("/" )
UpperCAmelCase : Optional[int] = os.path.join(__magic_name__ , __magic_name__ )
elif add_prefix:
UpperCAmelCase : List[Any] = checkpoint
UpperCAmelCase : str = dump_path
else:
UpperCAmelCase : List[str] = None
UpperCAmelCase : List[Any] = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
UpperCAmelCase : List[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
UpperCAmelCase : List[Any] = file_path.split(__magic_name__ )[-1][0]
if next_char == "/":
UpperCAmelCase : str = os.path.join(__magic_name__ , __magic_name__ )
UpperCAmelCase : Dict = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
UpperCAmelCase : Any = tokenizer.save_pretrained(
__magic_name__ , legacy_format=__magic_name__ , filename_prefix=__magic_name__ )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(__magic_name__ )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
a : Any = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 679 | 0 |
from math import pi, sqrt
def __a ( A__ : Optional[Any] ):
if num <= 0:
raise ValueError("math domain error" )
if num > 171.5:
raise OverflowError("math range error" )
elif num - int(A__ ) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer" )
elif num == 0.5:
return sqrt(A__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __a ( ):
assert gamma(0.5 ) == sqrt(A__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__A : List[Any] = 1.0
while num:
__A : str = float(input('Gamma of: '))
print(f'gamma({num}) = {gamma(num)}')
print('\nEnter 0 to exit...') | 16 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "dandelin/vilt-b32-finetuned-vqa"
SCREAMING_SNAKE_CASE__ : Dict = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
SCREAMING_SNAKE_CASE__ : List[str] = "image_qa"
SCREAMING_SNAKE_CASE__ : int = AutoProcessor
SCREAMING_SNAKE_CASE__ : Tuple = AutoModelForVisualQuestionAnswering
SCREAMING_SNAKE_CASE__ : Any = ["image", "text"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["text"]
def __init__( self , *snake_case , **snake_case ):
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*snake_case , **snake_case )
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
return self.pre_processor(snake_case , snake_case , return_tensors="pt" )
def A_ ( self , snake_case ):
'''simple docstring'''
with torch.no_grad():
return self.model(**snake_case ).logits
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Any = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 679 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowercase : List[str] = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 542 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
a : Optional[int] = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = R"\w+[.]\d+"
UpperCAmelCase : Dict = re.findall(__magic_name__ , __magic_name__ )
for pat in pats:
UpperCAmelCase : Tuple = key.replace(__magic_name__ , "_".join(pat.split("." ) ) )
return key
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase : Dict = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase : int = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase : Union[str, Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=42 ):
'''simple docstring'''
UpperCAmelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase : Tuple = flax_model.init_weights(PRNGKey(__magic_name__ ) )
UpperCAmelCase : Optional[Any] = flatten_dict(__magic_name__ )
UpperCAmelCase : List[str] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase : Tuple = rename_key(__magic_name__ )
UpperCAmelCase : List[str] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
UpperCAmelCase , UpperCAmelCase : Optional[int] = rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
UpperCAmelCase : Optional[int] = jnp.asarray(__magic_name__ )
return unflatten_dict(__magic_name__ )
| 679 | 0 |
from __future__ import annotations
import pandas as pd
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
__lowerCAmelCase = [0] * no_of_processes
__lowerCAmelCase = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__snake_case ):
__lowerCAmelCase = burst_time[i]
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 9_9999_9999
__lowerCAmelCase = 0
__lowerCAmelCase = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__snake_case ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__lowerCAmelCase = remaining_time[j]
__lowerCAmelCase = j
__lowerCAmelCase = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__lowerCAmelCase = remaining_time[short]
if minm == 0:
__lowerCAmelCase = 9_9999_9999
if remaining_time[short] == 0:
complete += 1
__lowerCAmelCase = False
# Find finish time of current process
__lowerCAmelCase = increment_time + 1
# Calculate waiting time
__lowerCAmelCase = finish_time - arrival_time[short]
__lowerCAmelCase = finar - burst_time[short]
if waiting_time[short] < 0:
__lowerCAmelCase = 0
# Increment time
increment_time += 1
return waiting_time
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
__lowerCAmelCase = [0] * no_of_processes
for i in range(__snake_case ):
__lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for i in range(__snake_case ):
__lowerCAmelCase = total_waiting_time + waiting_time[i]
__lowerCAmelCase = total_turn_around_time + turn_around_time[i]
print(F"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print("Average turn around time =" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
lowerCamelCase : Dict = int(input())
lowerCamelCase : Dict = [0] * no_of_processes
lowerCamelCase : Any = [0] * no_of_processes
lowerCamelCase : str = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
lowerCamelCase : List[Any] = map(int, input().split())
lowerCamelCase : int = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCamelCase : Any = burst_time
lowerCamelCase : List[str] = no_of_processes
lowerCamelCase : List[str] = waiting_time
lowerCamelCase : Optional[int] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
lowerCamelCase : Dict = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 367 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = (EulerDiscreteScheduler,)
SCREAMING_SNAKE_CASE__ : List[Any] = 10
def A_ ( self , **snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = {
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**snake_case )
return config
def A_ ( self ):
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case )
def A_ ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case , beta_end=snake_case )
def A_ ( self ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case )
def A_ ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase : Union[str, Any] = self.get_scheduler_config()
UpperCAmelCase : Optional[Any] = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = self.dummy_model()
UpperCAmelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : Any = sample.to(snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : Tuple = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : List[Any] = model(snake_case , snake_case )
UpperCAmelCase : str = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Dict = output.prev_sample
UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : List[Any] = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase : int = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCAmelCase : List[Any] = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = self.dummy_model()
UpperCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : int = sample.to(snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : str = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : Dict = model(snake_case , snake_case )
UpperCAmelCase : List[Any] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Any = output.prev_sample
UpperCAmelCase : Optional[int] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase : Optional[int] = self.get_scheduler_config()
UpperCAmelCase : Any = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : int = self.dummy_model()
UpperCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase : str = sample.to(snake_case )
for t in scheduler.timesteps:
UpperCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : List[Any] = model(snake_case , snake_case )
UpperCAmelCase : List[str] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Dict = output.prev_sample
UpperCAmelCase : Optional[int] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.scheduler_classes[0]
UpperCAmelCase : Tuple = self.get_scheduler_config()
UpperCAmelCase : Dict = scheduler_class(**snake_case , use_karras_sigmas=snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case )
UpperCAmelCase : List[str] = torch.manual_seed(0 )
UpperCAmelCase : Any = self.dummy_model()
UpperCAmelCase : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase : List[str] = sample.to(snake_case )
for t in scheduler.timesteps:
UpperCAmelCase : str = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : Dict = model(snake_case , snake_case )
UpperCAmelCase : Dict = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : List[str] = output.prev_sample
UpperCAmelCase : int = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
| 679 | 0 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowercase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowercase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
lowercase : set[int] = {ord(char) for char in VALID_CHARS}
lowercase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowerCAmelCase__ ( _a : Optional[Any] , _a : Any ):
snake_case_ : str = ""
snake_case_ : int
snake_case_ : int
snake_case_ : int
for keychar, cipherchar in zip(cycle(_a ) , _a ):
snake_case_ : Dict = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_a )
return decoded
def lowerCAmelCase__ ( _a : Dict ):
snake_case_ : list[str] = []
for key in product(_a , repeat=3 ):
snake_case_ : int = try_key(_a , _a )
if encoded is not None:
possibles.append(_a )
return possibles
def lowerCAmelCase__ ( _a : int , _a : int ):
return [possible for possible in possibles if common_word in possible.lower()]
def lowerCAmelCase__ ( _a : Tuple = "p059_cipher.txt" ):
snake_case_ : list[int]
snake_case_ : list[str]
snake_case_ : str
snake_case_ : str
snake_case_ : str = Path(_a ).parent.joinpath(_a ).read_text(encoding="utf-8" )
snake_case_ : str = [int(_a ) for number in data.strip().split("," )]
snake_case_ : List[str] = filter_valid_chars(_a )
for common_word in COMMON_WORDS:
snake_case_ : Any = filter_common_word(_a , _a )
if len(_a ) == 1:
break
snake_case_ : List[str] = possibles[0]
return sum(ord(_a ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 568 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def A_ ( self , snake_case ):
'''simple docstring'''
with open(snake_case , encoding="utf-8" ) as input_file:
UpperCAmelCase : Dict = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
UpperCAmelCase : Tuple = input_file.read()
UpperCAmelCase : List[Any] = regexp.search(snake_case )
return match
def A_ ( self , snake_case ):
'''simple docstring'''
with open(snake_case , encoding="utf-8" ) as input_file:
UpperCAmelCase : List[str] = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
UpperCAmelCase : List[Any] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase : str = regexp.finditer(snake_case )
UpperCAmelCase : Union[str, Any] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = Path("./datasets" )
UpperCAmelCase : Optional[int] = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(snake_case ) ):
raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Path("./datasets" )
UpperCAmelCase : Any = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(snake_case ) ):
raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 679 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Generic, TypeVar
lowerCAmelCase_ = TypeVar('''T''')
class _snake_case( Generic[T] ):
def __init__(self : Union[str, Any] , a : Dict ) -> int:
"""simple docstring"""
A__ = data
A__ = self
A__ = 0
class _snake_case( Generic[T] ):
def __init__(self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
A__ = {}
def _UpperCamelCase (self : List[Any] , a : List[str] ) -> Optional[Any]:
"""simple docstring"""
A__ = DisjointSetTreeNode(a )
def _UpperCamelCase (self : Any , a : List[Any] ) -> str:
"""simple docstring"""
A__ = self.map[data]
if elem_ref != elem_ref.parent:
A__ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def _UpperCamelCase (self : Tuple , a : Optional[int] , a : str ) -> Union[str, Any]:
"""simple docstring"""
if nodea.rank > nodea.rank:
A__ = nodea
else:
A__ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def _UpperCamelCase (self : int , a : Any , a : int ) -> Any:
"""simple docstring"""
self.link(self.find_set(a ) , self.find_set(a ) )
class _snake_case( Generic[T] ):
def __init__(self : List[str] ) -> Optional[Any]:
"""simple docstring"""
A__ = {}
def _UpperCamelCase (self : Optional[Any] , a : List[str] ) -> Optional[int]:
"""simple docstring"""
if node not in self.connections:
A__ = {}
def _UpperCamelCase (self : List[str] , a : int , a : Optional[int] , a : Optional[Any] ) -> int:
"""simple docstring"""
self.add_node(a )
self.add_node(a )
A__ = weight
A__ = weight
def _UpperCamelCase (self : List[Any] ) -> Dict:
"""simple docstring"""
A__ = []
A__ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda a : x[2] )
# creating the disjoint set
A__ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(a )
# MST generation
A__ = 0
A__ = 0
A__ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
A__ = edges[index]
index += 1
A__ = disjoint_set.find_set(a )
A__ = disjoint_set.find_set(a )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(a , a , a )
disjoint_set.union(a , a )
return graph
| 531 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a : str = logging.getLogger(__name__)
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def A_ ( self , snake_case , snake_case , snake_case=None , snake_case=None ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.layer[current_layer](snake_case , snake_case , head_mask[current_layer] )
UpperCAmelCase : Optional[int] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , lowercase__ , )
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__(snake_case )
UpperCAmelCase : Dict = BertEncoderWithPabee(snake_case )
self.init_weights()
UpperCAmelCase : int = 0
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : List[Any] = 0
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = threshold
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = patience
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = 0
UpperCAmelCase : List[Any] = 0
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.inference_layers_num / self.inference_instances_num
UpperCAmelCase : List[Any] = (
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(snake_case )
@add_start_docstrings_to_model_forward(snake_case )
def A_ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=False , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
UpperCAmelCase : Dict = input_ids.size()
elif inputs_embeds is not None:
UpperCAmelCase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
UpperCAmelCase : Optional[int] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCAmelCase : Tuple = torch.ones(snake_case , device=snake_case )
if token_type_ids is None:
UpperCAmelCase : List[Any] = torch.zeros(snake_case , dtype=torch.long , device=snake_case )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCAmelCase : torch.Tensor = self.get_extended_attention_mask(snake_case , snake_case , snake_case )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = encoder_hidden_states.size()
UpperCAmelCase : List[str] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
UpperCAmelCase : int = torch.ones(snake_case , device=snake_case )
UpperCAmelCase : str = self.invert_attention_mask(snake_case )
else:
UpperCAmelCase : int = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCAmelCase : Dict = self.get_head_mask(snake_case , self.config.num_hidden_layers )
UpperCAmelCase : Tuple = self.embeddings(
input_ids=snake_case , position_ids=snake_case , token_type_ids=snake_case , inputs_embeds=snake_case )
UpperCAmelCase : int = embedding_output
if self.training:
UpperCAmelCase : int = []
for i in range(self.config.num_hidden_layers ):
UpperCAmelCase : List[Any] = self.encoder.adaptive_forward(
snake_case , current_layer=snake_case , attention_mask=snake_case , head_mask=snake_case )
UpperCAmelCase : Dict = self.pooler(snake_case )
UpperCAmelCase : List[Any] = output_layers[i](output_dropout(snake_case ) )
res.append(snake_case )
elif self.patience == 0: # Use all layers for inference
UpperCAmelCase : Union[str, Any] = self.encoder(
snake_case , attention_mask=snake_case , head_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
UpperCAmelCase : Optional[int] = self.pooler(encoder_outputs[0] )
UpperCAmelCase : List[str] = [output_layers[self.config.num_hidden_layers - 1](snake_case )]
else:
UpperCAmelCase : int = 0
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Optional[Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
UpperCAmelCase : Tuple = self.encoder.adaptive_forward(
snake_case , current_layer=snake_case , attention_mask=snake_case , head_mask=snake_case )
UpperCAmelCase : Any = self.pooler(snake_case )
UpperCAmelCase : int = output_layers[i](snake_case )
if regression:
UpperCAmelCase : Optional[Any] = logits.detach()
if patient_result is not None:
UpperCAmelCase : Union[str, Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
UpperCAmelCase : Optional[Any] = 0
else:
UpperCAmelCase : Any = logits.detach().argmax(dim=1 )
if patient_result is not None:
UpperCAmelCase : Tuple = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(snake_case ) ):
patient_counter += 1
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = logits
if patient_counter == self.patience:
break
UpperCAmelCase : int = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , lowercase__ , )
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__(snake_case )
UpperCAmelCase : Union[str, Any] = config.num_labels
UpperCAmelCase : Optional[Any] = BertModelWithPabee(snake_case )
UpperCAmelCase : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase : Any = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(snake_case )
def A_ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : int = self.bert(
input_ids=snake_case , attention_mask=snake_case , token_type_ids=snake_case , position_ids=snake_case , head_mask=snake_case , inputs_embeds=snake_case , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
UpperCAmelCase : Tuple = (logits[-1],)
if labels is not None:
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : List[Any] = 0
for ix, logits_item in enumerate(snake_case ):
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase : Dict = MSELoss()
UpperCAmelCase : Union[str, Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase : Optional[int] = CrossEntropyLoss()
UpperCAmelCase : Tuple = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
UpperCAmelCase : int = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
UpperCAmelCase : Tuple = (total_loss / total_weights,) + outputs
return outputs
| 679 | 0 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCamelCase_ = CLIPImageProcessor()
UpperCamelCase_ = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
UpperCamelCase_ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path) | 256 |
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : Tuple = tf.cast(math.pi , x.dtype )
UpperCAmelCase : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype )
UpperCAmelCase : List[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__magic_name__ , 3 )) ))
return x * cdf
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = tf.convert_to_tensor(__magic_name__ )
return x * tf.tanh(tf.math.softplus(__magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype )
UpperCAmelCase : int = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : Optional[Any] = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return tf.clip_by_value(_gelu(__magic_name__ ) , -10 , 10 )
def lowercase ( __magic_name__ , __magic_name__=-1 ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Dict = tf.split(__magic_name__ , 2 , axis=__magic_name__ )
return a * tf.math.sigmoid(__magic_name__ )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowercase ( __magic_name__ ):
'''simple docstring'''
return tf.keras.activations.gelu(__magic_name__ , approximate=__magic_name__ )
a : Tuple = tf.keras.activations.gelu
a : Dict = approximate_gelu_wrap
else:
a : List[str] = _gelu
a : List[Any] = _gelu_new
a : Optional[int] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowercase ( __magic_name__ ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 679 | 0 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ :
def __init__( self : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple=3 , lowerCamelCase : Dict=32 , lowerCamelCase : Optional[int]=3 , lowerCamelCase : Dict=10 , lowerCamelCase : int=[10, 20, 30, 40] , lowerCamelCase : Optional[Any]=[1, 1, 2, 1] , lowerCamelCase : Optional[Any]=True , lowerCamelCase : Any=True , lowerCamelCase : Any="relu" , lowerCamelCase : Any=3 , lowerCamelCase : Tuple=None , ):
lowerCamelCase_ : Dict = parent
lowerCamelCase_ : int = batch_size
lowerCamelCase_ : Union[str, Any] = image_size
lowerCamelCase_ : Union[str, Any] = num_channels
lowerCamelCase_ : List[str] = embeddings_size
lowerCamelCase_ : Any = hidden_sizes
lowerCamelCase_ : int = depths
lowerCamelCase_ : List[str] = is_training
lowerCamelCase_ : List[str] = use_labels
lowerCamelCase_ : int = hidden_act
lowerCamelCase_ : Union[str, Any] = num_labels
lowerCamelCase_ : str = scope
lowerCamelCase_ : str = len(lowerCamelCase )
def __a ( self : List[Any] ):
lowerCamelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ : List[Any] = None
if self.use_labels:
lowerCamelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def __a ( self : List[Any] ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __a ( self : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : Tuple ):
lowerCamelCase_ : List[Any] = TFResNetModel(config=lowerCamelCase )
lowerCamelCase_ : int = model(lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __a ( self : Any , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Dict ):
lowerCamelCase_ : List[str] = self.num_labels
lowerCamelCase_ : List[Any] = TFResNetForImageClassification(lowerCamelCase )
lowerCamelCase_ : Union[str, Any] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : Dict ):
lowerCamelCase_ : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase_ : str = config_and_inputs
lowerCamelCase_ : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( lowercase__ , lowercase__ , unittest.TestCase ):
_a : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_a : Optional[int] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_a : Dict = False
_a : int = False
_a : Tuple = False
_a : Optional[Any] = False
_a : Union[str, Any] = False
def __a ( self : Union[str, Any] ):
lowerCamelCase_ : Dict = TFResNetModelTester(self )
lowerCamelCase_ : List[Any] = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def __a ( self : Dict ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self : Union[str, Any] ):
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __a ( self : List[Any] ):
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __a ( self : Union[str, Any] ):
pass
def __a ( self : List[str] ):
lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Dict = model_class(lowerCamelCase )
lowerCamelCase_ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : List[str] = [*signature.parameters.keys()]
lowerCamelCase_ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def __a ( self : Optional[Any] ):
lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __a ( self : List[Any] ):
def check_hidden_states_output(lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : Tuple ):
lowerCamelCase_ : Optional[Any] = model_class(lowerCamelCase )
lowerCamelCase_ : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
lowerCamelCase_ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ : List[str] = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : Optional[int] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase_ : str = layer_type
lowerCamelCase_ : Optional[Any] = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ : str = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __a ( self : Union[str, Any] ):
lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def __a ( self : List[str] ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : Any = TFResNetModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( ):
lowerCamelCase_ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def __a ( self : str ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __a ( self : Union[str, Any] ):
lowerCamelCase_ : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCamelCase_ : Union[str, Any] = self.default_image_processor
lowerCamelCase_ : Tuple = prepare_img()
lowerCamelCase_ : str = image_processor(images=lowerCamelCase , return_tensors='tf' )
# forward pass
lowerCamelCase_ : Any = model(**lowerCamelCase )
# verify the logits
lowerCamelCase_ : Any = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
lowerCamelCase_ : List[str] = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase , atol=1E-4 ) )
| 364 |
'''simple docstring'''
from __future__ import annotations
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = order
# a_{0} ... a_{k}
UpperCAmelCase : Optional[int] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCAmelCase : List[Any] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCAmelCase : Dict = [0.0] * self.order
# y[n-1] ... y[n-k]
UpperCAmelCase : Optional[Any] = [0.0] * self.order
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
if len(snake_case ) < self.order:
UpperCAmelCase : Dict = [1.0, *a_coeffs]
if len(snake_case ) != self.order + 1:
UpperCAmelCase : Optional[Any] = (
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(snake_case )}"
)
raise ValueError(snake_case )
if len(snake_case ) != self.order + 1:
UpperCAmelCase : Optional[Any] = (
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(snake_case )}"
)
raise ValueError(snake_case )
UpperCAmelCase : Optional[int] = a_coeffs
UpperCAmelCase : Optional[Any] = b_coeffs
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCAmelCase : Optional[int] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCAmelCase : List[str] = self.input_history[:-1]
UpperCAmelCase : List[Any] = self.output_history[:-1]
UpperCAmelCase : str = sample
UpperCAmelCase : str = result
return result
| 679 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
__a : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCAmelCase ( lowercase__ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 100 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , ) -> Dict:
"""simple docstring"""
if audio_length_in_s is None:
UpperCamelCase = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCamelCase = audio_length_in_s * self.unet.config.sample_rate
UpperCamelCase = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
UpperCamelCase = int(SCREAMING_SNAKE_CASE )
if sample_size % down_scale_factor != 0:
UpperCamelCase = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
" process." )
UpperCamelCase = int(SCREAMING_SNAKE_CASE )
UpperCamelCase = next(iter(self.unet.parameters() ) ).dtype
UpperCamelCase = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
UpperCamelCase = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=self.device , dtype=SCREAMING_SNAKE_CASE )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE , device=audio.device )
UpperCamelCase = self.scheduler.timesteps.to(SCREAMING_SNAKE_CASE )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCamelCase = self.scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCamelCase = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=SCREAMING_SNAKE_CASE )
| 606 |
'''simple docstring'''
import argparse
from collections import defaultdict
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = F"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Tuple = f.readlines()
UpperCAmelCase : Tuple = F"class {class_name}("
UpperCAmelCase : str = F"{4 * ' '}def {test_name}("
UpperCAmelCase : Dict = F"{8 * ' '}{correct_line.split()[0]}"
UpperCAmelCase : Tuple = F"{16 * ' '}{correct_line.split()[0]}"
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : List[str] = False
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : Dict = False
UpperCAmelCase : Tuple = 0
UpperCAmelCase : int = 0
UpperCAmelCase : Tuple = []
for line in lines:
if line.startswith(__magic_name__ ):
UpperCAmelCase : int = True
elif in_class and line.startswith(__magic_name__ ):
UpperCAmelCase : Dict = True
elif in_class and in_func and (line.startswith(__magic_name__ ) or line.startswith(__magic_name__ )):
UpperCAmelCase : List[str] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCAmelCase : List[str] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCAmelCase : List[str] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"{spaces * ' '}{correct_line}" )
UpperCAmelCase : List[str] = False
else:
new_lines.append(__magic_name__ )
with open(__magic_name__ , "w" ) as f:
for line in new_lines:
f.write(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__=None ):
'''simple docstring'''
if fail is not None:
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Optional[int] = {l.strip() for l in f.readlines()}
else:
UpperCAmelCase : Any = None
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Tuple = f.readlines()
UpperCAmelCase : int = defaultdict(__magic_name__ )
for line in correct_lines:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
a : List[Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 679 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def update_area_of_max_square(_lowerCamelCase , _lowerCamelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
_lowerCAmelCase : Dict = update_area_of_max_square(_lowerCamelCase , col + 1 )
_lowerCAmelCase : Optional[Any] = update_area_of_max_square(row + 1 , col + 1 )
_lowerCAmelCase : str = update_area_of_max_square(row + 1 , _lowerCamelCase )
if mat[row][col]:
_lowerCAmelCase : Union[str, Any] = 1 + min([right, diagonal, down] )
_lowerCAmelCase : Union[str, Any] = max(largest_square_area[0] , _lowerCamelCase )
return sub_problem_sol
else:
return 0
_lowerCAmelCase : Any = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
_lowerCAmelCase : Optional[int] = update_area_of_max_square_using_dp_array(_lowerCamelCase , col + 1 , _lowerCamelCase )
_lowerCAmelCase : Tuple = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _lowerCamelCase )
_lowerCAmelCase : List[Any] = update_area_of_max_square_using_dp_array(row + 1 , _lowerCamelCase , _lowerCamelCase )
if mat[row][col]:
_lowerCAmelCase : str = 1 + min([right, diagonal, down] )
_lowerCAmelCase : Tuple = max(largest_square_area[0] , _lowerCamelCase )
_lowerCAmelCase : int = sub_problem_sol
return sub_problem_sol
else:
return 0
_lowerCAmelCase : List[Any] = [0]
_lowerCAmelCase : str = [[-1] * cols for _ in range(_lowerCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , _lowerCamelCase )
return largest_square_area[0]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = [[0] * (cols + 1) for _ in range(rows + 1 )]
_lowerCAmelCase : Union[str, Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
_lowerCAmelCase : List[str] = dp_array[row][col + 1]
_lowerCAmelCase : Optional[Any] = dp_array[row + 1][col + 1]
_lowerCAmelCase : Union[str, Any] = dp_array[row + 1][col]
if mat[row][col] == 1:
_lowerCAmelCase : Tuple = 1 + min(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : int = max(dp_array[row][col] , _lowerCamelCase )
else:
_lowerCAmelCase : int = 0
return largest_square_area
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = [0] * (cols + 1)
_lowerCAmelCase : Tuple = [0] * (cols + 1)
_lowerCAmelCase : int = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
_lowerCAmelCase : List[str] = current_row[col + 1]
_lowerCAmelCase : Any = next_row[col + 1]
_lowerCAmelCase : Optional[Any] = next_row[col]
if mat[row][col] == 1:
_lowerCAmelCase : Optional[int] = 1 + min(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : int = max(current_row[col] , _lowerCamelCase )
else:
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 259 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : TreeNode | None = None
SCREAMING_SNAKE_CASE__ : TreeNode | None = None
a : Optional[Any] = namedtuple("CoinsDistribResult", "moves excess")
def lowercase ( __magic_name__ ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(__magic_name__ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__magic_name__ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__magic_name__ ) != count_coins(__magic_name__ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(__magic_name__ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = get_distrib(node.left )
UpperCAmelCase , UpperCAmelCase : Any = get_distrib(node.right )
UpperCAmelCase : Optional[Any] = 1 - left_distrib_excess
UpperCAmelCase : int = 1 - right_distrib_excess
UpperCAmelCase : List[Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(__magic_name__ )
+ abs(__magic_name__ )
)
UpperCAmelCase : List[Any] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__magic_name__ , __magic_name__ )
return get_distrib(__magic_name__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679 | 0 |
def a_ ( __lowerCAmelCase ):
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
lowerCAmelCase__ = sum(__lowerCAmelCase ) / len(__lowerCAmelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 615 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a : List[Any] = logging.get_logger(__name__)
a : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a : int = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a : Any = {
"allenai/led-base-16384": 1_63_84,
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Tuple = LEDTokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , snake_case=True , **snake_case , ):
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(snake_case , pre_tok_state.pop("type" ) )
UpperCAmelCase : Any = add_prefix_space
UpperCAmelCase : str = pre_tok_class(**snake_case )
UpperCAmelCase : int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase : Dict = "post_processor"
UpperCAmelCase : Dict = getattr(self.backend_tokenizer , snake_case , snake_case )
if tokenizer_component_instance:
UpperCAmelCase : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase : int = tuple(state["sep"] )
if "cls" in state:
UpperCAmelCase : Union[str, Any] = tuple(state["cls"] )
UpperCAmelCase : Tuple = False
if state.get("add_prefix_space" , snake_case ) != add_prefix_space:
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Optional[int] = True
if state.get("trim_offsets" , snake_case ) != trim_offsets:
UpperCAmelCase : Tuple = trim_offsets
UpperCAmelCase : List[str] = True
if changes_to_apply:
UpperCAmelCase : Optional[Any] = getattr(snake_case , state.pop("type" ) )
UpperCAmelCase : Tuple = component_class(**snake_case )
setattr(self.backend_tokenizer , snake_case , snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def A_ ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value
UpperCAmelCase : Optional[Any] = value
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case , **snake_case )
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case , **snake_case )
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : str = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def A_ ( self , snake_case , snake_case=None ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self , snake_case , snake_case = None , snake_case = PaddingStrategy.DO_NOT_PAD , snake_case = None , snake_case = None , ):
'''simple docstring'''
UpperCAmelCase : int = super()._pad(
encoded_inputs=snake_case , max_length=snake_case , padding_strategy=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , )
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase : int = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase : Optional[int] = len(encoded_inputs["global_attention_mask"] ) != len(snake_case )
if needs_to_be_padded:
UpperCAmelCase : Tuple = len(snake_case ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase : List[str] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase : Any = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 679 | 0 |
def UpperCamelCase_( _snake_case : List[str] = 1000 ):
"""simple docstring"""
__a =-1
__a =0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__a =(n * n - 2 * a * n) // (2 * n - 2 * a)
__a =n - a - b
if c * c == (a * a + b * b):
__a =a * b * c
if candidate >= product:
__a =candidate
return product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 242 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowercase ( __magic_name__="" ):
'''simple docstring'''
UpperCAmelCase : Dict = tempfile.mkdtemp()
return os.path.join(__magic_name__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
UpperCAmelCase : int = AgentAudio(snake_case )
UpperCAmelCase : str = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(snake_case ) )
# Ensure that the file contains the same value as the original tensor
UpperCAmelCase , UpperCAmelCase : str = sf.read(snake_case )
self.assertTrue(torch.allclose(snake_case , torch.tensor(snake_case ) , atol=1e-4 ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
UpperCAmelCase : Any = get_new_path(suffix=".wav" )
sf.write(snake_case , snake_case , 1_6_0_0_0 )
UpperCAmelCase : Optional[Any] = AgentAudio(snake_case )
self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , snake_case )
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) )
UpperCAmelCase : Tuple = AgentImage(snake_case )
UpperCAmelCase : Tuple = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
UpperCAmelCase : Any = Image.open(snake_case )
UpperCAmelCase : List[str] = AgentImage(snake_case )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
UpperCAmelCase : Dict = Image.open(snake_case )
UpperCAmelCase : int = AgentImage(snake_case )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = "Hey!"
UpperCAmelCase : Tuple = AgentText(snake_case )
self.assertEqual(snake_case , agent_type.to_string() )
self.assertEqual(snake_case , agent_type.to_raw() )
self.assertEqual(snake_case , snake_case )
| 679 | 0 |
from __future__ import annotations
from collections.abc import Callable
__A : Tuple = list[list[float | int]]
def __a ( A__ : Union[str, Any] , A__ : str ):
SCREAMING_SNAKE_CASE = len(A__ )
SCREAMING_SNAKE_CASE = [[0 for _ in range(size + 1 )] for _ in range(A__ )]
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
for row in range(A__ ):
for col in range(A__ ):
SCREAMING_SNAKE_CASE = matrix[row][col]
SCREAMING_SNAKE_CASE = vector[row][0]
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE = max((abs(augmented[rowa][col] ), rowa) for rowa in range(A__ , A__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , A__ ):
SCREAMING_SNAKE_CASE = augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , A__ ):
for row in range(A__ ):
SCREAMING_SNAKE_CASE = augmented[row][col] / augmented[col][col]
for cola in range(A__ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(A__ )
]
def __a ( A__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = len(A__ )
SCREAMING_SNAKE_CASE = [[0 for _ in range(A__ )] for _ in range(A__ )]
SCREAMING_SNAKE_CASE = [[0] for _ in range(A__ )]
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
for x_val, y_val in enumerate(A__ ):
for col in range(A__ ):
SCREAMING_SNAKE_CASE = (x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE = y_val
SCREAMING_SNAKE_CASE = solve(A__ , A__ )
def interpolated_func(A__ : List[Any] ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(A__ ) )
return interpolated_func
def __a ( A__ : int ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __a ( A__ : Union[str, Any] = question_function , A__ : Tuple = 10 ):
SCREAMING_SNAKE_CASE = [func(A__ ) for x_val in range(1 , order + 1 )]
SCREAMING_SNAKE_CASE = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
for poly in polynomials:
SCREAMING_SNAKE_CASE = 1
while func(A__ ) == poly(A__ ):
x_val += 1
ret += poly(A__ )
return ret
if __name__ == "__main__":
print(f'{solution() = }') | 16 |
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
def get_masked_lm_array(__magic_name__ ):
UpperCAmelCase : Tuple = F"masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : List[str] = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : str = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_array(__magic_name__ ):
UpperCAmelCase : List[Any] = F"encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : Optional[Any] = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : str = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_layer_array(__magic_name__ , __magic_name__ ):
UpperCAmelCase : Union[str, Any] = F"encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : int = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : Optional[int] = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_attention_layer_array(__magic_name__ , __magic_name__ , __magic_name__ ):
UpperCAmelCase : Tuple = F"encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : List[str] = tf.train.load_variable(__magic_name__ , __magic_name__ )
UpperCAmelCase : int = array.reshape(__magic_name__ )
if "kernel" in name:
UpperCAmelCase : Optional[Any] = array.transpose()
return torch.from_numpy(__magic_name__ )
print(F"Loading model based on config from {config_path}..." )
UpperCAmelCase : Optional[Any] = BertConfig.from_json_file(__magic_name__ )
UpperCAmelCase : Optional[Any] = BertForMaskedLM(__magic_name__ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
UpperCAmelCase : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
UpperCAmelCase : BertSelfAttention = layer.attention.self
UpperCAmelCase : List[Any] = get_encoder_attention_layer_array(
__magic_name__ , "_query_dense/kernel" , self_attn.query.weight.data.shape )
UpperCAmelCase : Tuple = get_encoder_attention_layer_array(
__magic_name__ , "_query_dense/bias" , self_attn.query.bias.data.shape )
UpperCAmelCase : int = get_encoder_attention_layer_array(
__magic_name__ , "_key_dense/kernel" , self_attn.key.weight.data.shape )
UpperCAmelCase : Optional[int] = get_encoder_attention_layer_array(
__magic_name__ , "_key_dense/bias" , self_attn.key.bias.data.shape )
UpperCAmelCase : Tuple = get_encoder_attention_layer_array(
__magic_name__ , "_value_dense/kernel" , self_attn.value.weight.data.shape )
UpperCAmelCase : str = get_encoder_attention_layer_array(
__magic_name__ , "_value_dense/bias" , self_attn.value.bias.data.shape )
# Self-attention Output
UpperCAmelCase : BertSelfOutput = layer.attention.output
UpperCAmelCase : str = get_encoder_attention_layer_array(
__magic_name__ , "_output_dense/kernel" , self_output.dense.weight.data.shape )
UpperCAmelCase : Union[str, Any] = get_encoder_attention_layer_array(
__magic_name__ , "_output_dense/bias" , self_output.dense.bias.data.shape )
UpperCAmelCase : str = get_encoder_layer_array(__magic_name__ , "_attention_layer_norm/gamma" )
UpperCAmelCase : List[str] = get_encoder_layer_array(__magic_name__ , "_attention_layer_norm/beta" )
# Intermediate
UpperCAmelCase : BertIntermediate = layer.intermediate
UpperCAmelCase : Dict = get_encoder_layer_array(__magic_name__ , "_intermediate_dense/kernel" )
UpperCAmelCase : Tuple = get_encoder_layer_array(__magic_name__ , "_intermediate_dense/bias" )
# Output
UpperCAmelCase : BertOutput = layer.output
UpperCAmelCase : Optional[Any] = get_encoder_layer_array(__magic_name__ , "_output_dense/kernel" )
UpperCAmelCase : Optional[Any] = get_encoder_layer_array(__magic_name__ , "_output_dense/bias" )
UpperCAmelCase : List[str] = get_encoder_layer_array(__magic_name__ , "_output_layer_norm/gamma" )
UpperCAmelCase : Any = get_encoder_layer_array(__magic_name__ , "_output_layer_norm/beta" )
# Embeddings
UpperCAmelCase : int = get_encoder_array("_position_embedding_layer/embeddings" )
UpperCAmelCase : str = get_encoder_array("_type_embedding_layer/embeddings" )
UpperCAmelCase : Optional[Any] = get_encoder_array("_embedding_norm_layer/gamma" )
UpperCAmelCase : Any = get_encoder_array("_embedding_norm_layer/beta" )
# LM Head
UpperCAmelCase : str = model.cls.predictions.transform
UpperCAmelCase : List[Any] = get_masked_lm_array("dense/kernel" )
UpperCAmelCase : List[Any] = get_masked_lm_array("dense/bias" )
UpperCAmelCase : Optional[Any] = get_masked_lm_array("layer_norm/gamma" )
UpperCAmelCase : Union[str, Any] = get_masked_lm_array("layer_norm/beta" )
UpperCAmelCase : Optional[Any] = get_masked_lm_array("embedding_table" )
# Pooling
UpperCAmelCase : str = BertPooler(config=__magic_name__ )
UpperCAmelCase : BertPooler = get_encoder_array("_pooler_layer/kernel" )
UpperCAmelCase : BertPooler = get_encoder_array("_pooler_layer/bias" )
# Export final model
model.save_pretrained(__magic_name__ )
# Integration test - should load without any errors ;)
UpperCAmelCase : Optional[int] = BertForMaskedLM.from_pretrained(__magic_name__ )
print(new_model.eval() )
print("Model conversion was done sucessfully!" )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
a : Any = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 679 | 0 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __lowercase ( lowercase__ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = "char"
UpperCAmelCase_ : int = "bpe"
UpperCAmelCase_ : Any = "wp"
lowercase : Tuple = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __lowercase ( lowercase__ ):
"""simple docstring"""
UpperCAmelCase_ : int = ["image_processor", "char_tokenizer"]
UpperCAmelCase_ : List[str] = "ViTImageProcessor"
UpperCAmelCase_ : Optional[Any] = "MgpstrTokenizer"
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Tuple:
A : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __UpperCAmelCase , )
A : List[str] = kwargs.pop('''feature_extractor''' )
A : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
A : List[str] = tokenizer
A : int = AutoTokenizer.from_pretrained('''gpt2''' )
A : List[Any] = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> List[Any]:
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
A : Optional[Any] = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
A : int = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
A : Any = encodings["input_ids"]
return inputs
def snake_case ( self , __UpperCAmelCase ) -> str:
A : Dict = sequences
A : Optional[int] = char_preds.size(0 )
A : List[Any] = self._decode_helper(__UpperCAmelCase , '''char''' )
A : Optional[int] = self._decode_helper(__UpperCAmelCase , '''bpe''' )
A : str = self._decode_helper(__UpperCAmelCase , '''wp''' )
A : Optional[Any] = []
A : Tuple = []
for i in range(__UpperCAmelCase ):
A : int = [char_scores[i], bpe_scores[i], wp_scores[i]]
A : str = [char_strs[i], bpe_strs[i], wp_strs[i]]
A : List[Any] = scores.index(max(__UpperCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
A : Any = {}
A : Dict = final_strs
A : Tuple = final_scores
A : str = char_strs
A : Tuple = bpe_strs
A : Dict = wp_strs
return out
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
if format == DecodeType.CHARACTER:
A : Any = self.char_decode
A : Tuple = 1
A : Optional[Any] = "[s]"
elif format == DecodeType.BPE:
A : List[Any] = self.bpe_decode
A : Optional[int] = 2
A : str = "#"
elif format == DecodeType.WORDPIECE:
A : Optional[int] = self.wp_decode
A : Optional[Any] = 1_02
A : Dict = "[SEP]"
else:
raise ValueError(f'Format {format} is not supported.' )
A : Tuple = [], []
A : Union[str, Any] = pred_logits.size(0 )
A : Union[str, Any] = pred_logits.size(1 )
A : Tuple = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase )
A : List[Any] = preds_index.view(-1 , __UpperCAmelCase )[:, 1:]
A : Optional[int] = decoder(__UpperCAmelCase )
A : str = torch.nn.functional.softmax(__UpperCAmelCase , dim=2 ).max(dim=2 )
A : Dict = preds_max_prob[:, 1:]
for index in range(__UpperCAmelCase ):
A : str = preds_str[index].find(__UpperCAmelCase )
A : List[str] = preds_str[index][:pred_eos]
A : Union[str, Any] = preds_index[index].cpu().tolist()
A : Tuple = pred_index.index(__UpperCAmelCase ) if eos_token in pred_index else -1
A : Union[str, Any] = preds_max_prob[index][: pred_eos_index + 1]
A : List[str] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__UpperCAmelCase )
conf_scores.append(__UpperCAmelCase )
return dec_strs, conf_scores
def snake_case ( self , __UpperCAmelCase ) -> Union[str, Any]:
A : str = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
def snake_case ( self , __UpperCAmelCase ) -> str:
return self.bpe_tokenizer.batch_decode(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ) -> Optional[Any]:
A : Union[str, Any] = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
| 542 |
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
a : str = "src/transformers"
# Matches is_xxx_available()
a : Union[str, Any] = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
a : int = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a : Any = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
a : Dict = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
a : Any = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a : List[str] = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
a : Union[str, Any] = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
a : List[str] = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
a : Any = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
a : Union[str, Any] = re.compile(R"^\s*try:")
# Catches a line with else:
a : Tuple = re.compile(R"^\s*else:")
def lowercase ( __magic_name__ ):
'''simple docstring'''
if _re_test_backend.search(__magic_name__ ) is None:
return None
UpperCAmelCase : Optional[int] = [b[0] for b in _re_backend.findall(__magic_name__ )]
backends.sort()
return "_and_".join(__magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
with open(__magic_name__ , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase : str = f.readlines()
UpperCAmelCase : Optional[int] = 0
while line_index < len(__magic_name__ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__magic_name__ ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCAmelCase : str = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
UpperCAmelCase : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__magic_name__ ):
UpperCAmelCase : int = _re_one_line_import_struct.search(__magic_name__ ).groups()[0]
UpperCAmelCase : Any = re.findall("\[([^\]]+)\]" , __magic_name__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
UpperCAmelCase : Optional[int] = _re_import_struct_key_value.search(__magic_name__ )
if single_line_import_search is not None:
UpperCAmelCase : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
UpperCAmelCase : Dict = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCAmelCase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase : List[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
UpperCAmelCase : List[str] = lines[line_index]
if _re_import_struct_add_one.search(__magic_name__ ) is not None:
objects.append(_re_import_struct_add_one.search(__magic_name__ ).groups()[0] )
elif _re_import_struct_add_many.search(__magic_name__ ) is not None:
UpperCAmelCase : List[str] = _re_import_struct_add_many.search(__magic_name__ ).groups()[0].split(", " )
UpperCAmelCase : int = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif _re_between_brackets.search(__magic_name__ ) is not None:
UpperCAmelCase : Optional[Any] = _re_between_brackets.search(__magic_name__ ).groups()[0].split(", " )
UpperCAmelCase : Optional[int] = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif _re_quote_object.search(__magic_name__ ) is not None:
objects.append(_re_quote_object.search(__magic_name__ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
UpperCAmelCase : Optional[int] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCAmelCase : List[str] = []
while (
line_index < len(__magic_name__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
UpperCAmelCase : int = lines[line_index]
UpperCAmelCase : Tuple = _re_import.search(__magic_name__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCAmelCase : Optional[Any] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__magic_name__ ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCAmelCase : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
UpperCAmelCase : str = lines[line_index]
UpperCAmelCase : Tuple = _re_import.search(__magic_name__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCAmelCase : Dict = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
def find_duplicates(__magic_name__ ):
return [k for k, v in collections.Counter(__magic_name__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCAmelCase : Tuple = []
for key in import_dict_objects.keys():
UpperCAmelCase : List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
UpperCAmelCase : Any = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCAmelCase : List[Any] = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : int = []
for root, _, files in os.walk(__magic_name__ ):
if "__init__.py" in files:
UpperCAmelCase : Dict = os.path.join(__magic_name__ , "__init__.py" )
UpperCAmelCase : Optional[Any] = parse_init(__magic_name__ )
if objects is not None:
UpperCAmelCase : int = analyze_results(*__magic_name__ )
if len(__magic_name__ ) > 0:
UpperCAmelCase : Union[str, Any] = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(__magic_name__ ) )
if len(__magic_name__ ) > 0:
raise ValueError("\n\n".join(__magic_name__ ) )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = []
for path, directories, files in os.walk(__magic_name__ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__magic_name__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__magic_name__ ) / folder).glob("*.py" ) ) ) == 0:
continue
UpperCAmelCase : Any = str((Path(__magic_name__ ) / folder).relative_to(__magic_name__ ) )
UpperCAmelCase : Optional[Any] = short_path.replace(os.path.sep , "." )
submodules.append(__magic_name__ )
for fname in files:
if fname == "__init__.py":
continue
UpperCAmelCase : List[str] = str((Path(__magic_name__ ) / fname).relative_to(__magic_name__ ) )
UpperCAmelCase : str = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__magic_name__ )
return submodules
a : str = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = importlib.util.spec_from_file_location(
"transformers" , os.path.join(__magic_name__ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
UpperCAmelCase : Optional[int] = spec.loader.load_module()
UpperCAmelCase : Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__magic_name__ ) > 0:
UpperCAmelCase : List[str] = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 679 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowerCamelCase : List[Any] = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
lowerCamelCase : str = {"facebook/blenderbot_small-90M": 512}
def __lowerCAmelCase ( __snake_case ):
__lowerCAmelCase = set()
__lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCAmelCase = char
__lowerCAmelCase = set(__snake_case )
return pairs
class _UpperCamelCase (lowercase__ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="__start__" , __UpperCamelCase="__end__" , __UpperCamelCase="__unk__" , __UpperCamelCase="__null__" , **__UpperCamelCase , )-> Optional[int]:
super().__init__(unk_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , pad_token=__UpperCamelCase , **__UpperCamelCase )
with open(__UpperCamelCase , encoding="utf-8" ) as vocab_handle:
__lowerCAmelCase = json.load(__UpperCamelCase )
__lowerCAmelCase = {v: k for k, v in self.encoder.items()}
with open(__UpperCamelCase , encoding="utf-8" ) as merges_handle:
__lowerCAmelCase = merges_handle.read().split("\n" )[1:-1]
__lowerCAmelCase = [tuple(merge.split() ) for merge in merges]
__lowerCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
__lowerCAmelCase = {}
@property
def __UpperCAmelCase ( self )-> Any:
return len(self.encoder )
def __UpperCAmelCase ( self )-> Optional[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCAmelCase ( self , __UpperCamelCase )-> Tuple:
if token in self.cache:
return self.cache[token]
__lowerCAmelCase = re.sub("([.,!?()])" , R" \1" , __UpperCamelCase )
__lowerCAmelCase = re.sub("(')" , R" \1 " , __UpperCamelCase )
__lowerCAmelCase = re.sub(R"\s{2,}" , " " , __UpperCamelCase )
if "\n" in token:
__lowerCAmelCase = token.replace("\n" , " __newln__" )
__lowerCAmelCase = token.split(" " )
__lowerCAmelCase = []
for token in tokens:
if not len(__UpperCamelCase ):
continue
__lowerCAmelCase = token.lower()
__lowerCAmelCase = tuple(__UpperCamelCase )
__lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
__lowerCAmelCase = get_pairs(__UpperCamelCase )
if not pairs:
words.append(__UpperCamelCase )
continue
while True:
__lowerCAmelCase = min(__UpperCamelCase , key=lambda __UpperCamelCase : self.bpe_ranks.get(__UpperCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCAmelCase = bigram
__lowerCAmelCase = []
__lowerCAmelCase = 0
while i < len(__UpperCamelCase ):
try:
__lowerCAmelCase = word.index(__UpperCamelCase , __UpperCamelCase )
new_word.extend(word[i:j] )
__lowerCAmelCase = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCAmelCase = tuple(__UpperCamelCase )
__lowerCAmelCase = new_word
if len(__UpperCamelCase ) == 1:
break
else:
__lowerCAmelCase = get_pairs(__UpperCamelCase )
__lowerCAmelCase = "@@ ".join(__UpperCamelCase )
__lowerCAmelCase = word[:-4]
__lowerCAmelCase = word
words.append(__UpperCamelCase )
return " ".join(__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase )-> List[Any]:
__lowerCAmelCase = []
__lowerCAmelCase = re.findall(R"\S+\n?" , __UpperCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__UpperCamelCase ).split(" " ) ) )
return split_tokens
def __UpperCAmelCase ( self , __UpperCamelCase )-> Optional[int]:
__lowerCAmelCase = token.lower()
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def __UpperCAmelCase ( self , __UpperCamelCase )-> List[str]:
return self.decoder.get(__UpperCamelCase , self.unk_token )
def __UpperCAmelCase ( self , __UpperCamelCase )-> Union[str, Any]:
__lowerCAmelCase = " ".join(__UpperCamelCase ).replace("@@ " , "" ).strip()
return out_string
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = None )-> str:
if not os.path.isdir(__UpperCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__lowerCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + "\n" )
__lowerCAmelCase = 0
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__lowerCAmelCase = token_index
writer.write(" ".join(__UpperCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
| 367 |
'''simple docstring'''
import os
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = os.path.dirname(os.path.realpath(__magic_name__ ) )
UpperCAmelCase : Any = os.path.join(__magic_name__ , "triangle.txt" )
with open(__magic_name__ ) as f:
UpperCAmelCase : str = f.readlines()
UpperCAmelCase : Optional[int] = []
for line in triangle:
UpperCAmelCase : List[str] = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(__magic_name__ ) )
a.append(__magic_name__ )
for i in range(1 , len(__magic_name__ ) ):
for j in range(len(a[i] ) ):
UpperCAmelCase : Union[str, Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0
UpperCAmelCase : List[str] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__magic_name__ , __magic_name__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 679 | 0 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase__ ( _a : int , _a : int=0.999 , _a : List[Any]="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(_a : int ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_a : Any ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
snake_case_ : Optional[int] = []
for i in range(_a ):
snake_case_ : Optional[Any] = i / num_diffusion_timesteps
snake_case_ : Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_a ) / alpha_bar_fn(_a ) , _a ) )
return torch.tensor(_a , dtype=torch.floataa )
class UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
A : Any = [e.name for e in KarrasDiffusionSchedulers]
A : List[str] = 2
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 1000 , _SCREAMING_SNAKE_CASE = 0.0_0085 , _SCREAMING_SNAKE_CASE = 0.012 , _SCREAMING_SNAKE_CASE = "linear" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "epsilon" , _SCREAMING_SNAKE_CASE = "linspace" , _SCREAMING_SNAKE_CASE = 0 , ) -> Union[str, Any]:
if trained_betas is not None:
snake_case_ : Union[str, Any] = torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
elif beta_schedule == "linear":
snake_case_ : Union[str, Any] = torch.linspace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case_ : List[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _SCREAMING_SNAKE_CASE , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case_ : Optional[int] = betas_for_alpha_bar(_SCREAMING_SNAKE_CASE )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
snake_case_ : Union[str, Any] = 1.0 - self.betas
snake_case_ : Optional[int] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Any:
if schedule_timesteps is None:
snake_case_ : Union[str, Any] = self.timesteps
snake_case_ : Optional[Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
snake_case_ : int = 1 if len(_SCREAMING_SNAKE_CASE ) > 1 else 0
else:
snake_case_ : str = timestep.cpu().item() if torch.is_tensor(_SCREAMING_SNAKE_CASE ) else timestep
snake_case_ : int = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCAmelCase ( self ) -> Optional[int]:
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Dict:
snake_case_ : List[Any] = self.index_for_timestep(_SCREAMING_SNAKE_CASE )
if self.state_in_first_order:
snake_case_ : Union[str, Any] = self.sigmas[step_index]
else:
snake_case_ : List[str] = self.sigmas_interpol[step_index]
snake_case_ : Optional[int] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , ) -> Tuple:
snake_case_ : int = num_inference_steps
snake_case_ : Any = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
snake_case_ : str = np.linspace(0 , num_train_timesteps - 1 , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )[::-1].copy()
elif self.config.timestep_spacing == "leading":
snake_case_ : int = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case_ : Optional[int] = (np.arange(0 , _SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1].copy().astype(_SCREAMING_SNAKE_CASE )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
snake_case_ : Tuple = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case_ : Any = (np.arange(_SCREAMING_SNAKE_CASE , 0 , -step_ratio )).round().copy().astype(_SCREAMING_SNAKE_CASE )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
snake_case_ : Any = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
snake_case_ : str = torch.from_numpy(np.log(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = np.interp(_SCREAMING_SNAKE_CASE , np.arange(0 , len(_SCREAMING_SNAKE_CASE ) ) , _SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
snake_case_ : str = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE )
# interpolate sigmas
snake_case_ : Dict = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
snake_case_ : List[Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
snake_case_ : Optional[int] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(_SCREAMING_SNAKE_CASE ).startswith("mps" ):
# mps does not support float64
snake_case_ : Any = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
else:
snake_case_ : Optional[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
# interpolate timesteps
snake_case_ : Dict = self.sigma_to_t(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE , dtype=timesteps.dtype )
snake_case_ : List[Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
snake_case_ : Optional[Any] = torch.cat([timesteps[:1], interleaved_timesteps] )
snake_case_ : List[str] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
snake_case_ : Optional[int] = defaultdict(_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
snake_case_ : int = sigma.log()
# get distribution
snake_case_ : Union[str, Any] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
snake_case_ : List[Any] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
snake_case_ : Optional[int] = low_idx + 1
snake_case_ : List[Any] = self.log_sigmas[low_idx]
snake_case_ : List[Any] = self.log_sigmas[high_idx]
# interpolate sigmas
snake_case_ : Union[str, Any] = (low - log_sigma) / (low - high)
snake_case_ : List[Any] = w.clamp(0 , 1 )
# transform interpolation to time range
snake_case_ : Tuple = (1 - w) * low_idx + w * high_idx
snake_case_ : int = t.view(sigma.shape )
return t
@property
def _lowerCAmelCase ( self ) -> List[Any]:
return self.sample is None
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ) -> Any:
snake_case_ : Any = self.index_for_timestep(_SCREAMING_SNAKE_CASE )
# advance index counter by 1
snake_case_ : List[Any] = timestep.cpu().item() if torch.is_tensor(_SCREAMING_SNAKE_CASE ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
snake_case_ : List[str] = self.sigmas[step_index]
snake_case_ : Optional[Any] = self.sigmas_interpol[step_index + 1]
snake_case_ : Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
snake_case_ : int = self.sigmas[step_index - 1]
snake_case_ : int = self.sigmas_interpol[step_index]
snake_case_ : Union[str, Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
snake_case_ : Any = 0
snake_case_ : Any = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
snake_case_ : str = sigma_hat if self.state_in_first_order else sigma_interpol
snake_case_ : List[str] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
snake_case_ : List[Any] = sigma_hat if self.state_in_first_order else sigma_interpol
snake_case_ : Optional[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
snake_case_ : Union[str, Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
snake_case_ : Tuple = sigma_interpol - sigma_hat
# store for 2nd order step
snake_case_ : Union[str, Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
snake_case_ : int = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
snake_case_ : List[str] = sigma_next - sigma_hat
snake_case_ : str = self.sample
snake_case_ : Union[str, Any] = None
snake_case_ : Dict = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> str:
snake_case_ : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_SCREAMING_SNAKE_CASE ):
# mps does not support float64
snake_case_ : Tuple = self.timesteps.to(original_samples.device , dtype=torch.floataa )
snake_case_ : Union[str, Any] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
snake_case_ : Tuple = self.timesteps.to(original_samples.device )
snake_case_ : Union[str, Any] = timesteps.to(original_samples.device )
snake_case_ : Dict = [self.index_for_timestep(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for t in timesteps]
snake_case_ : str = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
snake_case_ : Any = sigma.unsqueeze(-1 )
snake_case_ : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Any:
return self.config.num_train_timesteps
| 568 |
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if n == 1 or not isinstance(__magic_name__ , __magic_name__ ):
return 0
elif n == 2:
return 1
else:
UpperCAmelCase : Optional[int] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Union[str, Any] = 2
while digits < n:
index += 1
UpperCAmelCase : Any = len(str(fibonacci(__magic_name__ ) ) )
return index
def lowercase ( __magic_name__ = 1000 ):
'''simple docstring'''
return fibonacci_digits_index(__magic_name__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 679 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _snake_case( unittest.TestCase ):
def _UpperCamelCase (self : int , a : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
A__ = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(a )
def _UpperCamelCase (self : List[str] ) -> int:
"""simple docstring"""
A__ = "sshleifer/tiny-gpt2"
A__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
A__ = TensorFlowBenchmark(a )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCamelCase (self : List[str] ) -> List[Any]:
"""simple docstring"""
A__ = "sgugger/tiny-distilbert-classification"
A__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , only_pretrain_model=a , )
A__ = TensorFlowBenchmark(a )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCamelCase (self : Optional[int] ) -> int:
"""simple docstring"""
A__ = "sshleifer/tiny-gpt2"
A__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
A__ = TensorFlowBenchmark(a )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCamelCase (self : List[Any] ) -> List[Any]:
"""simple docstring"""
A__ = "sshleifer/tiny-gpt2"
A__ = AutoConfig.from_pretrained(a )
A__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
A__ = TensorFlowBenchmark(a , [config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCamelCase (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
A__ = "sshleifer/tiny-gpt2"
A__ = AutoConfig.from_pretrained(a )
A__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
A__ = TensorFlowBenchmark(a , [config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCamelCase (self : Union[str, Any] ) -> int:
"""simple docstring"""
A__ = "sshleifer/tiny-gpt2"
A__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
A__ = TensorFlowBenchmark(a )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCamelCase (self : str ) -> Optional[int]:
"""simple docstring"""
A__ = "sshleifer/tiny-gpt2"
A__ = AutoConfig.from_pretrained(a )
A__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
A__ = TensorFlowBenchmark(a , [config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCamelCase (self : int ) -> Optional[int]:
"""simple docstring"""
A__ = "patrickvonplaten/t5-tiny-random"
A__ = AutoConfig.from_pretrained(a )
A__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
A__ = TensorFlowBenchmark(a , configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def _UpperCamelCase (self : Tuple ) -> Optional[Any]:
"""simple docstring"""
A__ = "sshleifer/tiny-gpt2"
A__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a , multi_process=a , )
A__ = TensorFlowBenchmark(a )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCamelCase (self : Any ) -> str:
"""simple docstring"""
A__ = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , save_to_csv=a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(a , 'env.csv' ) , multi_process=a , )
A__ = TensorFlowBenchmark(a )
benchmark.run()
self.assertTrue(Path(os.path.join(a , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'env.csv' ) ).exists() )
def _UpperCamelCase (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
A__ = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(a : int ):
self.assertTrue(hasattr(a , 'sequential' ) )
self.assertTrue(hasattr(a , 'cumulative' ) )
self.assertTrue(hasattr(a , 'current' ) )
self.assertTrue(hasattr(a , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a , 'log.txt' ) , log_print=a , trace_memory_line_by_line=a , eager_mode=a , multi_process=a , )
A__ = TensorFlowBenchmark(a )
A__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(a , 'log.txt' ) ).exists() )
| 531 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
a : List[str] = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
a : Dict = {
"169M": 7_68,
"430M": 10_24,
"1B5": 20_48,
"3B": 25_60,
"7B": 40_96,
"14B": 51_20,
}
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = list(state_dict.keys() )
for name in state_dict_keys:
UpperCAmelCase : str = state_dict.pop(__magic_name__ )
# emb -> embedding
if name.startswith("emb." ):
UpperCAmelCase : str = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
UpperCAmelCase : int = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
UpperCAmelCase : Optional[int] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __magic_name__ )
# ffn -> feed_forward
UpperCAmelCase : Tuple = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __magic_name__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
UpperCAmelCase : Optional[Any] = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
UpperCAmelCase : List[str] = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
UpperCAmelCase : List[Any] = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
UpperCAmelCase : List[str] = "rwkv." + name
UpperCAmelCase : List[Any] = weight
return state_dict
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=False , __magic_name__=None ):
'''simple docstring'''
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
UpperCAmelCase : List[str] = 5_0277
UpperCAmelCase : str = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
UpperCAmelCase : List[Any] = PreTrainedTokenizerFast(tokenizer_file=__magic_name__ )
UpperCAmelCase : List[Any] = len(__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
# 2. Build the config
UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
UpperCAmelCase : Union[str, Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." )
UpperCAmelCase : str = RwkvConfig(
vocab_size=__magic_name__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__magic_name__ )
# 3. Download model file then convert state_dict
UpperCAmelCase : Union[str, Any] = hf_hub_download(__magic_name__ , __magic_name__ )
UpperCAmelCase : Optional[Any] = torch.load(__magic_name__ , map_location="cpu" )
UpperCAmelCase : Union[str, Any] = convert_state_dict(__magic_name__ )
# 4. Split in shards and save
UpperCAmelCase , UpperCAmelCase : Any = shard_checkpoint(__magic_name__ )
for shard_file, shard in shards.items():
torch.save(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) )
if index is not None:
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
# Save the index as well
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
UpperCAmelCase : List[Any] = json.dumps(__magic_name__ , indent=2 , sort_keys=__magic_name__ ) + "\n"
f.write(__magic_name__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
UpperCAmelCase : Any = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
UpperCAmelCase : Dict = torch.load(os.path.join(__magic_name__ , __magic_name__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__magic_name__ , __magic_name__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
UpperCAmelCase : int = AutoModelForCausalLM.from_pretrained(__magic_name__ )
model.push_to_hub(__magic_name__ , max_shard_size="2GB" )
tokenizer.push_to_hub(__magic_name__ )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
a : Dict = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 679 | 0 |
import numpy as np
def _lowerCamelCase ( lowerCamelCase_: Dict ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def _lowerCamelCase ( lowerCamelCase_: str ):
'''simple docstring'''
return vector * sigmoid(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 256 |
'''simple docstring'''
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase : Optional[Any] = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
UpperCAmelCase : List[Any] = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
UpperCAmelCase : Dict = max(len(__magic_name__ ) , len(__magic_name__ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(__magic_name__ ) , b_binary.zfill(__magic_name__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
_lowercase : str =logging.getLogger(__name__)
@dataclass
class UpperCamelCase_ :
_a : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_a : Optional[str] = field(
default=lowercase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_a : Optional[str] = field(
default=lowercase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_a : Optional[str] = field(
default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
_a : bool = field(
default=lowercase__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
_a : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_a : bool = field(
default=lowercase__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
@dataclass
class UpperCamelCase_ :
_a : Optional[str] = field(default=lowercase__ , metadata={'help': 'The input training data file (a text file).'} )
_a : Optional[str] = field(
default=lowercase__ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
_a : bool = field(
default=lowercase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
_a : Optional[int] = field(
default=lowercase__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
_a : Optional[int] = field(
default=lowercase__ , metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_a : bool = field(
default=lowercase__ , metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
} , )
_a : Optional[int] = field(
default=lowercase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_a : Optional[int] = field(
default=lowercase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __a ( self : List[Any] ):
if self.train_file is not None:
lowerCamelCase_ : Optional[Any] = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCamelCase_ : Tuple = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCamelCase_ :
_a : PreTrainedTokenizerBase
_a : Union[bool, str, PaddingStrategy] = True
_a : Optional[int] = None
_a : Optional[int] = None
def __call__( self : List[str] , lowerCamelCase : Any ):
lowerCamelCase_ : List[str] = "label" if "label" in features[0].keys() else "labels"
lowerCamelCase_ : int = [feature.pop(lowerCamelCase ) for feature in features]
lowerCamelCase_ : int = len(lowerCamelCase )
lowerCamelCase_ : List[str] = len(features[0]['input_ids'] )
lowerCamelCase_ : Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(lowerCamelCase )] for feature in features
]
lowerCamelCase_ : Optional[int] = list(chain(*lowerCamelCase ) )
lowerCamelCase_ : Union[str, Any] = self.tokenizer.pad(
lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
lowerCamelCase_ : Dict = {k: v.view(lowerCamelCase , lowerCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
lowerCamelCase_ : List[str] = torch.tensor(lowerCamelCase , dtype=torch.intaa )
return batch
def _SCREAMING_SNAKE_CASE ( ):
lowerCamelCase_ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ : List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' ,lowerCAmelCase__ ,lowerCAmelCase__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ : int = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
datasets.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCamelCase_ : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCamelCase_ : Optional[int] = {}
if data_args.train_file is not None:
lowerCamelCase_ : Dict = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Optional[Any] = data_args.validation_file
lowerCamelCase_ : Any = data_args.train_file.split('.' )[-1]
lowerCamelCase_ : Union[str, Any] = load_dataset(
lowerCAmelCase__ ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
# Downloading and loading the swag dataset from the hub.
lowerCamelCase_ : Optional[Any] = load_dataset(
'swag' ,'regular' ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCamelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCamelCase_ : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('.ckpt' in model_args.model_name_or_path ) ,config=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCamelCase_ : Optional[Any] = [F"ending{i}" for i in range(4 )]
lowerCamelCase_ : int = "sent1"
lowerCamelCase_ : int = "sent2"
if data_args.max_seq_length is None:
lowerCamelCase_ : Tuple = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
lowerCamelCase_ : List[str] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
lowerCamelCase_ : Dict = min(data_args.max_seq_length ,tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCAmelCase__ ):
lowerCamelCase_ : str = [[context] * 4 for context in examples[context_name]]
lowerCamelCase_ : Optional[int] = examples[question_header_name]
lowerCamelCase_ : List[Any] = [
[F"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(lowerCAmelCase__ )
]
# Flatten out
lowerCamelCase_ : Any = list(chain(*lowerCAmelCase__ ) )
lowerCamelCase_ : Optional[Any] = list(chain(*lowerCAmelCase__ ) )
# Tokenize
lowerCamelCase_ : str = tokenizer(
lowerCAmelCase__ ,lowerCAmelCase__ ,truncation=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding='max_length' if data_args.pad_to_max_length else False ,)
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 ,len(lowerCAmelCase__ ) ,4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
lowerCamelCase_ : Union[str, Any] = raw_datasets["train"]
if data_args.max_train_samples is not None:
lowerCamelCase_ : Tuple = min(len(lowerCAmelCase__ ) ,data_args.max_train_samples )
lowerCamelCase_ : Dict = train_dataset.select(range(lowerCAmelCase__ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
lowerCamelCase_ : List[Any] = train_dataset.map(
lowerCAmelCase__ ,batched=lowerCAmelCase__ ,num_proc=data_args.preprocessing_num_workers ,load_from_cache_file=not data_args.overwrite_cache ,)
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
lowerCamelCase_ : Any = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
lowerCamelCase_ : str = min(len(lowerCAmelCase__ ) ,data_args.max_eval_samples )
lowerCamelCase_ : Tuple = eval_dataset.select(range(lowerCAmelCase__ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
lowerCamelCase_ : Optional[Any] = eval_dataset.map(
lowerCAmelCase__ ,batched=lowerCAmelCase__ ,num_proc=data_args.preprocessing_num_workers ,load_from_cache_file=not data_args.overwrite_cache ,)
# Data collator
lowerCamelCase_ : Any = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCAmelCase__ ,pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCAmelCase__ ):
lowerCamelCase_ : Dict = eval_predictions
lowerCamelCase_ : Dict = np.argmax(lowerCAmelCase__ ,axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCamelCase_ : Tuple = Trainer(
model=lowerCAmelCase__ ,args=lowerCAmelCase__ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,tokenizer=lowerCAmelCase__ ,data_collator=lowerCAmelCase__ ,compute_metrics=lowerCAmelCase__ ,)
# Training
if training_args.do_train:
lowerCamelCase_ : Tuple = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ : str = last_checkpoint
lowerCamelCase_ : Optional[int] = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Tuple = train_result.metrics
lowerCamelCase_ : Any = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
lowerCamelCase_ : Any = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.log_metrics('train' ,lowerCAmelCase__ )
trainer.save_metrics('train' ,lowerCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCamelCase_ : List[Any] = trainer.evaluate()
lowerCamelCase_ : Union[str, Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ )
lowerCamelCase_ : Union[str, Any] = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.log_metrics('eval' ,lowerCAmelCase__ )
trainer.save_metrics('eval' ,lowerCAmelCase__ )
lowerCamelCase_ : Optional[Any] = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
main()
if __name__ == "__main__":
main()
| 364 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
a : Optional[Any] = "pt"
elif is_tf_available():
a : List[Any] = "tf"
else:
a : List[Any] = "jax"
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = PerceiverTokenizer
SCREAMING_SNAKE_CASE__ : List[str] = False
def A_ ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : List[str] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A_ ( self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def A_ ( self , **snake_case ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def A_ ( self , snake_case , snake_case=False , snake_case=2_0 , snake_case=5 ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = []
for i in range(len(snake_case ) ):
try:
UpperCAmelCase : int = tokenizer.decode([i] , clean_up_tokenization_spaces=snake_case )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCAmelCase : Optional[int] = list(filter(lambda snake_case : re.match(r"^[ a-zA-Z]+$" , t[1] ) , snake_case ) )
UpperCAmelCase : Any = list(filter(lambda snake_case : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=snake_case ) , snake_case ) )
if max_length is not None and len(snake_case ) > max_length:
UpperCAmelCase : Optional[Any] = toks[:max_length]
if min_length is not None and len(snake_case ) < min_length and len(snake_case ) > 0:
while len(snake_case ) < min_length:
UpperCAmelCase : Any = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase : Dict = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase : Any = tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case )
if " " not in output_txt and len(snake_case ) > 1:
UpperCAmelCase : Dict = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=snake_case )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=snake_case )
)
if with_prefix_space:
UpperCAmelCase : Union[str, Any] = " " + output_txt
UpperCAmelCase : Dict = tokenizer.encode(snake_case , add_special_tokens=snake_case )
return output_txt, output_ids
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.perceiver_tokenizer
UpperCAmelCase : Tuple = "Unicode €."
UpperCAmelCase : int = tokenizer(snake_case )
UpperCAmelCase : Tuple = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded["input_ids"] , snake_case )
# decoding
UpperCAmelCase : Optional[Any] = tokenizer.decode(snake_case )
self.assertEqual(snake_case , "[CLS]Unicode €.[SEP]" )
UpperCAmelCase : Tuple = tokenizer("e è é ê ë" )
UpperCAmelCase : str = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded["input_ids"] , snake_case )
# decoding
UpperCAmelCase : Dict = tokenizer.decode(snake_case )
self.assertEqual(snake_case , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.perceiver_tokenizer
UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
UpperCAmelCase : List[str] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
UpperCAmelCase : Dict = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case )
self.assertIsInstance(snake_case , snake_case )
if FRAMEWORK != "jax":
UpperCAmelCase : List[Any] = list(batch.input_ids.numpy()[0] )
else:
UpperCAmelCase : str = list(batch.input_ids.tolist()[0] )
self.assertListEqual(snake_case , snake_case )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.perceiver_tokenizer
UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase : List[Any] = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , snake_case )
self.assertIn("attention_mask" , snake_case )
self.assertNotIn("decoder_input_ids" , snake_case )
self.assertNotIn("decoder_attention_mask" , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.perceiver_tokenizer
UpperCAmelCase : int = [
"Summary of the text.",
"Another summary.",
]
UpperCAmelCase : List[Any] = tokenizer(
text_target=snake_case , max_length=3_2 , padding="max_length" , truncation=snake_case , return_tensors=snake_case )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
UpperCAmelCase : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase : Dict = tempfile.mkdtemp()
UpperCAmelCase : Any = " He is very happy, UNwant\u00E9d,running"
UpperCAmelCase : int = tokenizer.encode(snake_case , add_special_tokens=snake_case )
tokenizer.save_pretrained(snake_case )
UpperCAmelCase : List[str] = tokenizer.__class__.from_pretrained(snake_case )
UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
shutil.rmtree(snake_case )
UpperCAmelCase : Dict = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase : str = tempfile.mkdtemp()
UpperCAmelCase : int = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
UpperCAmelCase : int = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase : List[str] = tokenizer.encode(snake_case , add_special_tokens=snake_case )
tokenizer.save_pretrained(snake_case )
UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(snake_case )
UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
UpperCAmelCase : Optional[int] = tokenizer.__class__.from_pretrained(snake_case , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(snake_case )
with open(os.path.join(snake_case , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase : Union[str, Any] = json.load(snake_case )
with open(os.path.join(snake_case , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase : Any = json.load(snake_case )
UpperCAmelCase : str = [f"<extra_id_{i}>" for i in range(1_2_5 )]
UpperCAmelCase : List[Any] = added_tokens_extra_ids + [
"an_additional_special_token"
]
UpperCAmelCase : List[str] = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(snake_case , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case , snake_case )
with open(os.path.join(snake_case , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case , snake_case )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase : Optional[Any] = tokenizer_class.from_pretrained(
snake_case , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase : Optional[int] = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=snake_case )]
UpperCAmelCase : Optional[int] = tokenizer_class.from_pretrained(
snake_case , additional_special_tokens=snake_case , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , "�" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.get_tokenizers(fast=snake_case , do_lower_case=snake_case )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase : List[Any] = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
UpperCAmelCase : int = tokenizer.convert_tokens_to_string(snake_case )
self.assertIsInstance(snake_case , snake_case )
| 679 | 0 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
UpperCamelCase = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE )
UpperCamelCase = [t[-1] for t in os.walk(os.path.join(SCREAMING_SNAKE_CASE , os.listdir(SCREAMING_SNAKE_CASE )[0] , "snapshots" ) )]
UpperCamelCase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=SCREAMING_SNAKE_CASE )
UpperCamelCase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCamelCase = jax.random.PRNGKey(0 )
UpperCamelCase = 4
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE )
# shard inputs and rng
UpperCamelCase = replicate(SCREAMING_SNAKE_CASE )
UpperCamelCase = jax.random.split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase = shard(SCREAMING_SNAKE_CASE )
UpperCamelCase = pipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , jit=SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3
assert np.abs(np.abs(SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 49947.875 ) < 5e-1
UpperCamelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(SCREAMING_SNAKE_CASE ) == num_samples
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=SCREAMING_SNAKE_CASE )
UpperCamelCase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCamelCase = jax.random.PRNGKey(0 )
UpperCamelCase = 50
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE )
# shard inputs and rng
UpperCamelCase = replicate(SCREAMING_SNAKE_CASE )
UpperCamelCase = jax.random.split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase = shard(SCREAMING_SNAKE_CASE )
UpperCamelCase = pipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , jit=SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3
assert np.abs((np.abs(SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=SCREAMING_SNAKE_CASE )
UpperCamelCase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCamelCase = jax.random.PRNGKey(0 )
UpperCamelCase = 50
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE )
# shard inputs and rng
UpperCamelCase = replicate(SCREAMING_SNAKE_CASE )
UpperCamelCase = jax.random.split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase = shard(SCREAMING_SNAKE_CASE )
UpperCamelCase = pipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , jit=SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
UpperCamelCase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCamelCase = jax.random.PRNGKey(0 )
UpperCamelCase = 50
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE )
# shard inputs and rng
UpperCamelCase = replicate(SCREAMING_SNAKE_CASE )
UpperCamelCase = jax.random.split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase = shard(SCREAMING_SNAKE_CASE )
UpperCamelCase = pipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , jit=SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = FlaxDDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , set_alpha_to_one=SCREAMING_SNAKE_CASE , steps_offset=1 , )
UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , )
UpperCamelCase = scheduler.create_state()
UpperCamelCase = scheduler_state
UpperCamelCase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCamelCase = jax.random.PRNGKey(0 )
UpperCamelCase = 50
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE )
# shard inputs and rng
UpperCamelCase = replicate(SCREAMING_SNAKE_CASE )
UpperCamelCase = jax.random.split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase = shard(SCREAMING_SNAKE_CASE )
UpperCamelCase = pipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , jit=SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3
assert np.abs((np.abs(SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = jax.random.split(jax.random.PRNGKey(0 ) , SCREAMING_SNAKE_CASE )
UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=SCREAMING_SNAKE_CASE , )
UpperCamelCase = replicate(SCREAMING_SNAKE_CASE )
UpperCamelCase = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE )
UpperCamelCase = shard(SCREAMING_SNAKE_CASE )
UpperCamelCase = pipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , jit=SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
UpperCamelCase = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=SCREAMING_SNAKE_CASE , use_memory_efficient_attention=SCREAMING_SNAKE_CASE , )
UpperCamelCase = replicate(SCREAMING_SNAKE_CASE )
UpperCamelCase = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE )
UpperCamelCase = shard(SCREAMING_SNAKE_CASE )
UpperCamelCase = pipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , jit=SCREAMING_SNAKE_CASE ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
UpperCamelCase = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 606 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : str = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = "efficientformer"
def __init__( self , snake_case = [3, 2, 6, 4] , snake_case = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case = [True, True, True, True] , snake_case = 4_4_8 , snake_case = 3_2 , snake_case = 4 , snake_case = 7 , snake_case = 5 , snake_case = 8 , snake_case = 4 , snake_case = 0.0 , snake_case = 1_6 , snake_case = 3 , snake_case = 3 , snake_case = 3 , snake_case = 2 , snake_case = 1 , snake_case = 0.0 , snake_case = 1 , snake_case = True , snake_case = True , snake_case = 1e-5 , snake_case = "gelu" , snake_case = 0.02 , snake_case = 1e-12 , snake_case = 2_2_4 , snake_case = 1e-05 , **snake_case , ):
'''simple docstring'''
super().__init__(**snake_case )
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Optional[Any] = hidden_dropout_prob
UpperCAmelCase : List[Any] = hidden_sizes
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : int = patch_size
UpperCAmelCase : Optional[int] = num_channels
UpperCAmelCase : Any = depths
UpperCAmelCase : Dict = mlp_expansion_ratio
UpperCAmelCase : List[str] = downsamples
UpperCAmelCase : List[Any] = dim
UpperCAmelCase : Any = key_dim
UpperCAmelCase : List[str] = attention_ratio
UpperCAmelCase : Union[str, Any] = resolution
UpperCAmelCase : List[str] = pool_size
UpperCAmelCase : Dict = downsample_patch_size
UpperCAmelCase : Optional[int] = downsample_stride
UpperCAmelCase : Any = downsample_pad
UpperCAmelCase : int = drop_path_rate
UpperCAmelCase : Optional[Any] = num_metaad_blocks
UpperCAmelCase : List[str] = distillation
UpperCAmelCase : int = use_layer_scale
UpperCAmelCase : List[str] = layer_scale_init_value
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Any = batch_norm_eps
| 679 | 0 |
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
try:
with open(_lowerCamelCase , 'rb' ) as flax_state_f:
_lowerCAmelCase : Optional[int] = from_bytes(_lowerCamelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(_lowerCamelCase ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
_lowerCAmelCase : Any = flatten_dict(jax.tree_util.tree_map(lambda _lowerCamelCase : x.dtype == jnp.bfloataa , _lowerCamelCase ) ).values()
if any(_lowerCamelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
_lowerCAmelCase : str = jax.tree_util.tree_map(
lambda _lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _lowerCamelCase )
_lowerCAmelCase : str = ""
_lowerCAmelCase : Optional[Any] = flatten_dict(_lowerCamelCase , sep='.' )
_lowerCAmelCase : int = pt_model.state_dict()
# keep track of unexpected & missing keys
_lowerCAmelCase : str = []
_lowerCAmelCase : List[Any] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_lowerCAmelCase : Optional[int] = flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
_lowerCAmelCase : int = flax_key_tuple_array[:-1] + ["weight"]
_lowerCAmelCase : List[Any] = jnp.transpose(_lowerCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
_lowerCAmelCase : Any = flax_key_tuple_array[:-1] + ["weight"]
_lowerCAmelCase : str = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
_lowerCAmelCase : Optional[Any] = flax_key_tuple_array[:-1] + ["weight"]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = (
flax_key_tuple_string.replace('_0' , '.0' )
.replace('_1' , '.1' )
.replace('_2' , '.2' )
.replace('_3' , '.3' )
.replace('_4' , '.4' )
.replace('_5' , '.5' )
.replace('_6' , '.6' )
.replace('_7' , '.7' )
.replace('_8' , '.8' )
.replace('_9' , '.9' )
)
_lowerCAmelCase : List[str] = ".".join(_lowerCamelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
_lowerCAmelCase : Optional[int] = np.asarray(_lowerCamelCase ) if not isinstance(_lowerCamelCase , np.ndarray ) else flax_tensor
_lowerCAmelCase : Union[str, Any] = torch.from_numpy(_lowerCamelCase )
# remove from missing keys
missing_keys.remove(_lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_lowerCamelCase )
pt_model.load_state_dict(_lowerCamelCase )
# re-transform missing_keys to list
_lowerCAmelCase : str = list(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(_lowerCamelCase ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
' use it for predictions and inference.' )
return pt_model
| 259 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=3 , snake_case=3_2 , snake_case=3 , snake_case=1_0 , snake_case=[1_0, 2_0, 3_0, 4_0] , snake_case=[1, 1, 2, 1] , snake_case=True , snake_case=True , snake_case="relu" , snake_case=3 , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : Dict = parent
UpperCAmelCase : int = batch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Union[str, Any] = num_channels
UpperCAmelCase : List[str] = embeddings_size
UpperCAmelCase : Any = hidden_sizes
UpperCAmelCase : int = depths
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : List[str] = use_labels
UpperCAmelCase : int = hidden_act
UpperCAmelCase : Union[str, Any] = num_labels
UpperCAmelCase : str = scope
UpperCAmelCase : str = len(snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def A_ ( self ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = TFResNetModel(config=snake_case )
UpperCAmelCase : int = model(snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.num_labels
UpperCAmelCase : List[Any] = TFResNetForImageClassification(snake_case )
UpperCAmelCase : Union[str, Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = config_and_inputs
UpperCAmelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ : Optional[int] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = TFResNetModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case )
def A_ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self ):
'''simple docstring'''
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def A_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(snake_case )
UpperCAmelCase : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : List[str] = [*signature.parameters.keys()]
UpperCAmelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case , snake_case , snake_case ):
UpperCAmelCase : Optional[Any] = model_class(snake_case )
UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : List[str] = self.model_tester.num_stages
self.assertEqual(len(snake_case ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[int] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase : str = layer_type
UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : str = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def A_ ( self ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Any = TFResNetModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase : Union[str, Any] = self.default_image_processor
UpperCAmelCase : Tuple = prepare_img()
UpperCAmelCase : str = image_processor(images=snake_case , return_tensors="tf" )
# forward pass
UpperCAmelCase : Any = model(**snake_case )
# verify the logits
UpperCAmelCase : Any = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCAmelCase : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case , atol=1e-4 ) )
| 679 | 0 |
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if exponent == 1:
return base
if exponent % 2 == 0:
lowerCAmelCase__ = _modexpt(__lowerCAmelCase , exponent // 2 , __lowerCAmelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__lowerCAmelCase , exponent - 1 , __lowerCAmelCase )) % modulo_value
def a_ ( __lowerCAmelCase = 17_77 , __lowerCAmelCase = 18_55 , __lowerCAmelCase = 8 ):
lowerCAmelCase__ = base
for _ in range(1 , __lowerCAmelCase ):
lowerCAmelCase__ = _modexpt(__lowerCAmelCase , __lowerCAmelCase , 10**digits )
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 615 |
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=9_9 , snake_case=6_4 , snake_case=5 , snake_case=4 , snake_case=6_4 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : List[Any] = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : int = seq_length
UpperCAmelCase : Dict = is_training
UpperCAmelCase : Optional[Any] = use_input_mask
UpperCAmelCase : Optional[Any] = use_token_type_ids
UpperCAmelCase : Optional[Any] = use_labels
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[str] = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Tuple = attention_probs_dropout_prob
UpperCAmelCase : Any = max_position_embeddings
UpperCAmelCase : Tuple = type_vocab_size
UpperCAmelCase : Union[str, Any] = type_sequence_label_size
UpperCAmelCase : int = initializer_range
UpperCAmelCase : Dict = num_labels
UpperCAmelCase : Union[str, Any] = num_choices
UpperCAmelCase : List[Any] = scope
def A_ ( self ):
'''simple docstring'''
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Any = None
if self.use_input_mask:
UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : str = None
UpperCAmelCase : Dict = None
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self ):
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MPNetModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Dict = model(snake_case , snake_case )
UpperCAmelCase : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : int = MPNetForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Dict = model(
snake_case , attention_mask=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : Optional[int] = MPNetForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Optional[int] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.num_choices
UpperCAmelCase : Optional[int] = MPNetForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Tuple = model(
snake_case , attention_mask=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.num_labels
UpperCAmelCase : Tuple = MPNetForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : str = config_and_inputs
UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Any = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : str = True
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MPNetModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=3_7 )
def A_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*snake_case )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = MPNetModel.from_pretrained("microsoft/mpnet-base" )
UpperCAmelCase : Optional[int] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCAmelCase : Optional[Any] = model(snake_case )[0]
UpperCAmelCase : Optional[int] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , snake_case )
UpperCAmelCase : Optional[Any] = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
| 679 | 0 |
def UpperCamelCase_( _snake_case : List[Any] , _snake_case : int , _snake_case : Optional[Any] , _snake_case : Any ):
"""simple docstring"""
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def UpperCamelCase_( _snake_case : int , _snake_case : int , _snake_case : Union[str, Any] ):
"""simple docstring"""
if curr_ind == len(_snake_case ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_snake_case ) ):
if valid_connection(_snake_case , _snake_case , _snake_case , _snake_case ):
# Insert current vertex into path as next transition
__a =next_ver
# Validate created path
if util_hamilton_cycle(_snake_case , _snake_case , curr_ind + 1 ):
return True
# Backtrack
__a =-1
return False
def UpperCamelCase_( _snake_case : Tuple , _snake_case : int = 0 ):
"""simple docstring"""
__a =[-1] * (len(_snake_case ) + 1)
# initialize start and end of path with starting index
__a =start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_snake_case , _snake_case , 1 ) else []
| 242 |
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a : Optional[Any] = logging.get_logger(__name__)
a : List[str] = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
UpperCAmelCase : List[str] = TOKENIZER_CLASSES
else:
UpperCAmelCase : int = {tokenizer_name: getattr(__magic_name__ , tokenizer_name + "Fast" )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
UpperCAmelCase : Tuple = TOKENIZER_CLASSES[tokenizer_name]
UpperCAmelCase : Union[str, Any] = True
if checkpoint_name is None:
UpperCAmelCase : List[str] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
UpperCAmelCase : Dict = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
UpperCAmelCase : Union[str, Any] = tokenizer_class.from_pretrained(__magic_name__ , force_download=__magic_name__ )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCAmelCase , UpperCAmelCase : Dict = checkpoint.split("/" )
UpperCAmelCase : Optional[int] = os.path.join(__magic_name__ , __magic_name__ )
elif add_prefix:
UpperCAmelCase : List[Any] = checkpoint
UpperCAmelCase : str = dump_path
else:
UpperCAmelCase : List[str] = None
UpperCAmelCase : List[Any] = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
UpperCAmelCase : List[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
UpperCAmelCase : List[Any] = file_path.split(__magic_name__ )[-1][0]
if next_char == "/":
UpperCAmelCase : str = os.path.join(__magic_name__ , __magic_name__ )
UpperCAmelCase : Dict = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
UpperCAmelCase : Any = tokenizer.save_pretrained(
__magic_name__ , legacy_format=__magic_name__ , filename_prefix=__magic_name__ )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(__magic_name__ )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
a : Any = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 679 | 0 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__A : int = logging.get_logger(__name__)
def __a ( A__ : Dict ):
SCREAMING_SNAKE_CASE = torch.load(A__ , map_location="cpu" )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE = torch.load(A__ , map_location="cpu" )["model"]
# pop unnecessary weights
SCREAMING_SNAKE_CASE = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(A__ )
SCREAMING_SNAKE_CASE = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE = sd.pop(A__ )
SCREAMING_SNAKE_CASE = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE = sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE = key.replace(".qkv_proj." , ".q_proj." )
SCREAMING_SNAKE_CASE = key.replace(".qkv_proj." , ".k_proj." )
SCREAMING_SNAKE_CASE = key.replace(".qkv_proj." , ".v_proj." )
SCREAMING_SNAKE_CASE = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE = torch.split(A__ , depth // 3 , dim=0 )
SCREAMING_SNAKE_CASE = q
SCREAMING_SNAKE_CASE = k
SCREAMING_SNAKE_CASE = v
del sd[key]
return sd
@torch.no_grad()
def __a ( A__ : List[str] , A__ : Dict , A__ : Dict=None ):
SCREAMING_SNAKE_CASE = load_checkpoint(A__ )
if config is not None:
SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained(A__ )
else:
SCREAMING_SNAKE_CASE = OPTConfig()
SCREAMING_SNAKE_CASE = OPTModel(A__ ).half().eval()
model.load_state_dict(A__ )
# Check results
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
if __name__ == "__main__":
__A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
__A : Any = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 16 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "dandelin/vilt-b32-finetuned-vqa"
SCREAMING_SNAKE_CASE__ : Dict = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
SCREAMING_SNAKE_CASE__ : List[str] = "image_qa"
SCREAMING_SNAKE_CASE__ : int = AutoProcessor
SCREAMING_SNAKE_CASE__ : Tuple = AutoModelForVisualQuestionAnswering
SCREAMING_SNAKE_CASE__ : Any = ["image", "text"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["text"]
def __init__( self , *snake_case , **snake_case ):
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*snake_case , **snake_case )
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
return self.pre_processor(snake_case , snake_case , return_tensors="pt" )
def A_ ( self , snake_case ):
'''simple docstring'''
with torch.no_grad():
return self.model(**snake_case ).logits
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Any = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 679 | 0 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __lowercase ( lowercase__ ):
"""simple docstring"""
UpperCAmelCase_ : Any = "EncodecFeatureExtractor"
UpperCAmelCase_ : Union[str, Any] = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
A : int = self.feature_extractor
A : Optional[int] = False
def snake_case ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True ) -> Tuple:
return self.tokenizer.get_decoder_prompt_ids(task=__UpperCAmelCase , language=__UpperCAmelCase , no_timestamps=__UpperCAmelCase )
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase )
A : Any = kwargs.pop('''audio''' , __UpperCAmelCase )
A : List[str] = kwargs.pop('''sampling_rate''' , __UpperCAmelCase )
A : int = kwargs.pop('''text''' , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
A : Tuple = args[0]
A : str = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
A : Union[str, Any] = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
if audio is not None:
A : int = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
A : Dict = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
A : int = audio_inputs["padding_mask"]
return inputs
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
A : Dict = kwargs.pop('''audio''' , __UpperCAmelCase )
A : List[str] = kwargs.pop('''padding_mask''' , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
A : Optional[int] = args[0]
A : Union[str, Any] = args[1:]
if audio_values is not None:
return self._decode_audio(__UpperCAmelCase , padding_mask=__UpperCAmelCase )
else:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[Any]:
A : Dict = to_numpy(__UpperCAmelCase )
A : Any = audio_values.shape
if padding_mask is None:
return list(__UpperCAmelCase )
A : List[Any] = to_numpy(__UpperCAmelCase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
A : List[Any] = seq_len - padding_mask.shape[-1]
A : List[str] = 1 - self.feature_extractor.padding_value
A : List[Any] = np.pad(__UpperCAmelCase , ((0, 0), (0, difference)) , '''constant''' , constant_values=__UpperCAmelCase )
A : Union[str, Any] = audio_values.tolist()
for i in range(__UpperCAmelCase ):
A : str = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
A : Any = sliced_audio.reshape(__UpperCAmelCase , -1 )
return audio_values
| 542 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
a : Optional[int] = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = R"\w+[.]\d+"
UpperCAmelCase : Dict = re.findall(__magic_name__ , __magic_name__ )
for pat in pats:
UpperCAmelCase : Tuple = key.replace(__magic_name__ , "_".join(pat.split("." ) ) )
return key
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase : Dict = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase : int = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase : Union[str, Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=42 ):
'''simple docstring'''
UpperCAmelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase : Tuple = flax_model.init_weights(PRNGKey(__magic_name__ ) )
UpperCAmelCase : Optional[Any] = flatten_dict(__magic_name__ )
UpperCAmelCase : List[str] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase : Tuple = rename_key(__magic_name__ )
UpperCAmelCase : List[str] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
UpperCAmelCase , UpperCAmelCase : Optional[int] = rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
UpperCAmelCase : Optional[int] = jnp.asarray(__magic_name__ )
return unflatten_dict(__magic_name__ )
| 679 | 0 |
from PIL import Image
def __lowerCAmelCase ( __snake_case , __snake_case ):
def brightness(__snake_case ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(__snake_case )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
lowerCamelCase : List[str] = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 367 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = (EulerDiscreteScheduler,)
SCREAMING_SNAKE_CASE__ : List[Any] = 10
def A_ ( self , **snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = {
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**snake_case )
return config
def A_ ( self ):
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case )
def A_ ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case , beta_end=snake_case )
def A_ ( self ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case )
def A_ ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase : Union[str, Any] = self.get_scheduler_config()
UpperCAmelCase : Optional[Any] = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = self.dummy_model()
UpperCAmelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : Any = sample.to(snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : Tuple = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : List[Any] = model(snake_case , snake_case )
UpperCAmelCase : str = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Dict = output.prev_sample
UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : List[Any] = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase : int = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCAmelCase : List[Any] = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = self.dummy_model()
UpperCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : int = sample.to(snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : str = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : Dict = model(snake_case , snake_case )
UpperCAmelCase : List[Any] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Any = output.prev_sample
UpperCAmelCase : Optional[int] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase : Optional[int] = self.get_scheduler_config()
UpperCAmelCase : Any = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : int = self.dummy_model()
UpperCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase : str = sample.to(snake_case )
for t in scheduler.timesteps:
UpperCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : List[Any] = model(snake_case , snake_case )
UpperCAmelCase : List[str] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Dict = output.prev_sample
UpperCAmelCase : Optional[int] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.scheduler_classes[0]
UpperCAmelCase : Tuple = self.get_scheduler_config()
UpperCAmelCase : Dict = scheduler_class(**snake_case , use_karras_sigmas=snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case )
UpperCAmelCase : List[str] = torch.manual_seed(0 )
UpperCAmelCase : Any = self.dummy_model()
UpperCAmelCase : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase : List[str] = sample.to(snake_case )
for t in scheduler.timesteps:
UpperCAmelCase : str = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : Dict = model(snake_case , snake_case )
UpperCAmelCase : Dict = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : List[str] = output.prev_sample
UpperCAmelCase : int = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
| 679 | 0 |
def lowerCAmelCase__ ( _a : Optional[int] ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def lowerCAmelCase__ ( _a : Optional[Any] ):
snake_case_ : Tuple = 0
snake_case_ : Dict = len(_a ) # No of vertices in graph
snake_case_ : Dict = [0] * n
snake_case_ : List[Any] = [False] * n
def dfs(_a : List[str] , _a : Optional[int] , _a : Optional[int] , _a : int ):
snake_case_ : Dict = True
snake_case_ : str = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_a , _a , _a , id_ )
snake_case_ : Union[str, Any] = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
snake_case_ : Optional[Any] = min(low[at] , low[to] )
snake_case_ : list[tuple[int, int]] = []
for i in range(_a ):
if not visited[i]:
dfs(_a , -1 , _a , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 568 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def A_ ( self , snake_case ):
'''simple docstring'''
with open(snake_case , encoding="utf-8" ) as input_file:
UpperCAmelCase : Dict = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
UpperCAmelCase : Tuple = input_file.read()
UpperCAmelCase : List[Any] = regexp.search(snake_case )
return match
def A_ ( self , snake_case ):
'''simple docstring'''
with open(snake_case , encoding="utf-8" ) as input_file:
UpperCAmelCase : List[str] = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
UpperCAmelCase : List[Any] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase : str = regexp.finditer(snake_case )
UpperCAmelCase : Union[str, Any] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = Path("./datasets" )
UpperCAmelCase : Optional[int] = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(snake_case ) ):
raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Path("./datasets" )
UpperCAmelCase : Any = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(snake_case ) ):
raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 679 | 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = "▁"
lowerCAmelCase_ = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
lowerCAmelCase_ = {
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
lowerCAmelCase_ = {
"facebook/s2t-small-librispeech-asr": 1_0_2_4,
}
lowerCAmelCase_ = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
lowerCAmelCase_ = {"mustc": MUSTC_LANGS}
class _snake_case( lowercase__ ):
__snake_case: Union[str, Any] = VOCAB_FILES_NAMES
__snake_case: Tuple = PRETRAINED_VOCAB_FILES_MAP
__snake_case: int = MAX_MODEL_INPUT_SIZES
__snake_case: List[Any] = ["input_ids", "attention_mask"]
__snake_case: List[int] = []
def __init__(self : int , a : Tuple , a : Optional[int] , a : Optional[int]="<s>" , a : int="</s>" , a : Tuple="<pad>" , a : Dict="<unk>" , a : List[Any]=False , a : int=False , a : Dict=None , a : Dict=None , a : int = None , **a : str , ) -> List[Any]:
"""simple docstring"""
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a , eos_token=a , unk_token=a , pad_token=a , do_upper_case=a , do_lower_case=a , tgt_lang=a , lang_codes=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
A__ = do_upper_case
A__ = do_lower_case
A__ = load_json(a )
A__ = {v: k for k, v in self.encoder.items()}
A__ = spm_file
A__ = load_spm(a , self.sp_model_kwargs )
if lang_codes is not None:
A__ = lang_codes
A__ = LANGUAGES[lang_codes]
A__ = [f"""<lang:{lang}>""" for lang in self.langs]
A__ = {lang: self.sp_model.PieceToId(f"""<lang:{lang}>""" ) for lang in self.langs}
A__ = self.lang_tokens
A__ = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
A__ = {}
@property
def _UpperCamelCase (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return len(self.encoder )
@property
def _UpperCamelCase (self : Dict ) -> Tuple:
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def _UpperCamelCase (self : List[str] , a : Dict ) -> Tuple:
"""simple docstring"""
A__ = new_tgt_lang
self.set_tgt_lang_special_tokens(a )
def _UpperCamelCase (self : int , a : Optional[int] ) -> Tuple:
"""simple docstring"""
A__ = self.lang_code_to_id[tgt_lang]
A__ = [lang_code_id]
def _UpperCamelCase (self : int , a : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self.sp_model.encode(a , out_type=a )
def _UpperCamelCase (self : Tuple , a : Tuple ) -> List[Any]:
"""simple docstring"""
return self.encoder.get(a , self.encoder[self.unk_token] )
def _UpperCamelCase (self : Union[str, Any] , a : Dict ) -> Optional[Any]:
"""simple docstring"""
return self.decoder.get(a , self.unk_token )
def _UpperCamelCase (self : str , a : Optional[Any] ) -> List[Any]:
"""simple docstring"""
A__ = []
A__ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
A__ = self.sp_model.decode(a )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
A__ = []
else:
current_sub_tokens.append(a )
A__ = self.sp_model.decode(a )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _UpperCamelCase (self : Optional[Any] , a : Optional[int] , a : Union[str, Any]=None ) -> List[str]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _UpperCamelCase (self : Optional[int] , a : List[str] , a : Tuple = None , a : Optional[int] = False ) -> Optional[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
A__ = [1] * len(self.prefix_tokens )
A__ = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(a )) + suffix_ones
return prefix_ones + ([0] * len(a )) + ([0] * len(a )) + suffix_ones
def _UpperCamelCase (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
A__ = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : Optional[int] ) -> Any:
"""simple docstring"""
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__(self : Tuple , a : str ) -> Optional[int]:
"""simple docstring"""
A__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
A__ = {}
A__ = load_spm(self.spm_file , self.sp_model_kwargs )
def _UpperCamelCase (self : Dict , a : List[Any] , a : int = None ) -> Union[str, Any]:
"""simple docstring"""
A__ = Path(a )
assert save_dir.is_dir(), f"""{save_directory} should be a directory"""
A__ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
A__ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , a )
if os.path.abspath(self.spm_file ) != os.path.abspath(a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , a )
elif not os.path.isfile(self.spm_file ):
with open(a , 'wb' ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(a )
return (str(a ), str(a ))
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
A__ = sentencepiece.SentencePieceProcessor(**UpperCAmelCase )
spm.Load(str(UpperCAmelCase ) )
return spm
def _A ( UpperCAmelCase ):
'''simple docstring'''
with open(UpperCAmelCase ,'r' ) as f:
return json.load(UpperCAmelCase )
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
with open(UpperCAmelCase ,'w' ) as f:
json.dump(UpperCAmelCase ,UpperCAmelCase ,indent=2 )
| 531 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a : str = logging.getLogger(__name__)
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def A_ ( self , snake_case , snake_case , snake_case=None , snake_case=None ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.layer[current_layer](snake_case , snake_case , head_mask[current_layer] )
UpperCAmelCase : Optional[int] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , lowercase__ , )
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__(snake_case )
UpperCAmelCase : Dict = BertEncoderWithPabee(snake_case )
self.init_weights()
UpperCAmelCase : int = 0
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : List[Any] = 0
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = threshold
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = patience
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = 0
UpperCAmelCase : List[Any] = 0
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.inference_layers_num / self.inference_instances_num
UpperCAmelCase : List[Any] = (
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(snake_case )
@add_start_docstrings_to_model_forward(snake_case )
def A_ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=False , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
UpperCAmelCase : Dict = input_ids.size()
elif inputs_embeds is not None:
UpperCAmelCase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
UpperCAmelCase : Optional[int] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCAmelCase : Tuple = torch.ones(snake_case , device=snake_case )
if token_type_ids is None:
UpperCAmelCase : List[Any] = torch.zeros(snake_case , dtype=torch.long , device=snake_case )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCAmelCase : torch.Tensor = self.get_extended_attention_mask(snake_case , snake_case , snake_case )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = encoder_hidden_states.size()
UpperCAmelCase : List[str] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
UpperCAmelCase : int = torch.ones(snake_case , device=snake_case )
UpperCAmelCase : str = self.invert_attention_mask(snake_case )
else:
UpperCAmelCase : int = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCAmelCase : Dict = self.get_head_mask(snake_case , self.config.num_hidden_layers )
UpperCAmelCase : Tuple = self.embeddings(
input_ids=snake_case , position_ids=snake_case , token_type_ids=snake_case , inputs_embeds=snake_case )
UpperCAmelCase : int = embedding_output
if self.training:
UpperCAmelCase : int = []
for i in range(self.config.num_hidden_layers ):
UpperCAmelCase : List[Any] = self.encoder.adaptive_forward(
snake_case , current_layer=snake_case , attention_mask=snake_case , head_mask=snake_case )
UpperCAmelCase : Dict = self.pooler(snake_case )
UpperCAmelCase : List[Any] = output_layers[i](output_dropout(snake_case ) )
res.append(snake_case )
elif self.patience == 0: # Use all layers for inference
UpperCAmelCase : Union[str, Any] = self.encoder(
snake_case , attention_mask=snake_case , head_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
UpperCAmelCase : Optional[int] = self.pooler(encoder_outputs[0] )
UpperCAmelCase : List[str] = [output_layers[self.config.num_hidden_layers - 1](snake_case )]
else:
UpperCAmelCase : int = 0
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Optional[Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
UpperCAmelCase : Tuple = self.encoder.adaptive_forward(
snake_case , current_layer=snake_case , attention_mask=snake_case , head_mask=snake_case )
UpperCAmelCase : Any = self.pooler(snake_case )
UpperCAmelCase : int = output_layers[i](snake_case )
if regression:
UpperCAmelCase : Optional[Any] = logits.detach()
if patient_result is not None:
UpperCAmelCase : Union[str, Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
UpperCAmelCase : Optional[Any] = 0
else:
UpperCAmelCase : Any = logits.detach().argmax(dim=1 )
if patient_result is not None:
UpperCAmelCase : Tuple = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(snake_case ) ):
patient_counter += 1
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = logits
if patient_counter == self.patience:
break
UpperCAmelCase : int = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , lowercase__ , )
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__(snake_case )
UpperCAmelCase : Union[str, Any] = config.num_labels
UpperCAmelCase : Optional[Any] = BertModelWithPabee(snake_case )
UpperCAmelCase : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase : Any = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(snake_case )
def A_ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : int = self.bert(
input_ids=snake_case , attention_mask=snake_case , token_type_ids=snake_case , position_ids=snake_case , head_mask=snake_case , inputs_embeds=snake_case , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
UpperCAmelCase : Tuple = (logits[-1],)
if labels is not None:
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : List[Any] = 0
for ix, logits_item in enumerate(snake_case ):
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase : Dict = MSELoss()
UpperCAmelCase : Union[str, Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase : Optional[int] = CrossEntropyLoss()
UpperCAmelCase : Tuple = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
UpperCAmelCase : int = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
UpperCAmelCase : Tuple = (total_loss / total_weights,) + outputs
return outputs
| 679 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class _SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = "dpr"
def __init__( self : Any , snake_case_ : Tuple=3_0522 , snake_case_ : int=768 , snake_case_ : Union[str, Any]=12 , snake_case_ : int=12 , snake_case_ : int=3072 , snake_case_ : Tuple="gelu" , snake_case_ : Tuple=0.1 , snake_case_ : Tuple=0.1 , snake_case_ : str=512 , snake_case_ : Dict=2 , snake_case_ : List[str]=0.02 , snake_case_ : str=1E-12 , snake_case_ : Dict=0 , snake_case_ : Any="absolute" , snake_case_ : Optional[int] = 0 , **snake_case_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case_ , **snake_case_ )
A : Optional[int] = vocab_size
A : Tuple = hidden_size
A : Union[str, Any] = num_hidden_layers
A : List[str] = num_attention_heads
A : List[Any] = hidden_act
A : str = intermediate_size
A : List[str] = hidden_dropout_prob
A : Union[str, Any] = attention_probs_dropout_prob
A : int = max_position_embeddings
A : Union[str, Any] = type_vocab_size
A : str = initializer_range
A : int = layer_norm_eps
A : List[Any] = projection_dim
A : str = position_embedding_type | 256 |
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : Tuple = tf.cast(math.pi , x.dtype )
UpperCAmelCase : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype )
UpperCAmelCase : List[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__magic_name__ , 3 )) ))
return x * cdf
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = tf.convert_to_tensor(__magic_name__ )
return x * tf.tanh(tf.math.softplus(__magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype )
UpperCAmelCase : int = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : Optional[Any] = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return tf.clip_by_value(_gelu(__magic_name__ ) , -10 , 10 )
def lowercase ( __magic_name__ , __magic_name__=-1 ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Dict = tf.split(__magic_name__ , 2 , axis=__magic_name__ )
return a * tf.math.sigmoid(__magic_name__ )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowercase ( __magic_name__ ):
'''simple docstring'''
return tf.keras.activations.gelu(__magic_name__ , approximate=__magic_name__ )
a : Tuple = tf.keras.activations.gelu
a : Dict = approximate_gelu_wrap
else:
a : List[str] = _gelu
a : List[Any] = _gelu_new
a : Optional[int] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowercase ( __magic_name__ ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 679 | 0 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCamelCase_ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : int , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : List[Any] , lowerCamelCase : List[Any] = 1.0 , lowerCamelCase : str = None , ):
super().__init__()
lowerCamelCase_ : List[str] = initial_learning_rate
lowerCamelCase_ : str = warmup_steps
lowerCamelCase_ : Union[str, Any] = power
lowerCamelCase_ : Tuple = decay_schedule_fn
lowerCamelCase_ : List[str] = name
def __call__( self : str , lowerCamelCase : str ):
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowerCamelCase_ : Dict = tf.cast(lowerCamelCase , tf.floataa )
lowerCamelCase_ : Tuple = tf.cast(self.warmup_steps , tf.floataa )
lowerCamelCase_ : int = global_step_float / warmup_steps_float
lowerCamelCase_ : int = self.initial_learning_rate * tf.math.pow(lowerCamelCase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowerCamelCase , )
def __a ( self : Optional[Any] ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0.0 ,lowerCAmelCase__ = 0.9 ,lowerCAmelCase__ = 0.999 ,lowerCAmelCase__ = 1e-8 ,lowerCAmelCase__ = None ,lowerCAmelCase__ = None ,lowerCAmelCase__ = 0.0 ,lowerCAmelCase__ = 1.0 ,lowerCAmelCase__ = None ,):
lowerCamelCase_ : int = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=lowerCAmelCase__ ,decay_steps=num_train_steps - num_warmup_steps ,end_learning_rate=init_lr * min_lr_ratio ,power=lowerCAmelCase__ ,)
if num_warmup_steps:
lowerCamelCase_ : Union[str, Any] = WarmUp(
initial_learning_rate=lowerCAmelCase__ ,decay_schedule_fn=lowerCAmelCase__ ,warmup_steps=lowerCAmelCase__ ,)
if weight_decay_rate > 0.0:
lowerCamelCase_ : Optional[Any] = AdamWeightDecay(
learning_rate=lowerCAmelCase__ ,weight_decay_rate=lowerCAmelCase__ ,beta_a=lowerCAmelCase__ ,beta_a=lowerCAmelCase__ ,epsilon=lowerCAmelCase__ ,clipnorm=lowerCAmelCase__ ,global_clipnorm=lowerCAmelCase__ ,exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] ,include_in_weight_decay=lowerCAmelCase__ ,)
else:
lowerCamelCase_ : Optional[Any] = tf.keras.optimizers.Adam(
learning_rate=lowerCAmelCase__ ,beta_a=lowerCAmelCase__ ,beta_a=lowerCAmelCase__ ,epsilon=lowerCAmelCase__ ,clipnorm=lowerCAmelCase__ ,global_clipnorm=lowerCAmelCase__ ,)
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCamelCase_ ( lowercase__ ):
def __init__( self : Tuple , lowerCamelCase : Optional[Any] = 0.001 , lowerCamelCase : List[str] = 0.9 , lowerCamelCase : Any = 0.999 , lowerCamelCase : Union[str, Any] = 1E-7 , lowerCamelCase : Optional[Any] = False , lowerCamelCase : Union[str, Any] = 0.0 , lowerCamelCase : Union[str, Any] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : Dict = "AdamWeightDecay" , **lowerCamelCase : Optional[Any] , ):
super().__init__(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase )
lowerCamelCase_ : List[str] = weight_decay_rate
lowerCamelCase_ : Tuple = include_in_weight_decay
lowerCamelCase_ : Optional[int] = exclude_from_weight_decay
@classmethod
def __a ( cls : Tuple , lowerCamelCase : Union[str, Any] ):
lowerCamelCase_ : Union[str, Any] = {"WarmUp": WarmUp}
return super(lowerCamelCase , cls ).from_config(lowerCamelCase , custom_objects=lowerCamelCase )
def __a ( self : Union[str, Any] , lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : str ):
super(lowerCamelCase , self )._prepare_local(lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : int = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def __a ( self : str , lowerCamelCase : List[Any] , lowerCamelCase : List[str] , lowerCamelCase : Dict ):
lowerCamelCase_ : Optional[Any] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def __a ( self : Any , lowerCamelCase : List[Any] , lowerCamelCase : Tuple=None , **lowerCamelCase : Any ):
lowerCamelCase_ : Dict = list(zip(*lowerCamelCase ) )
return super(lowerCamelCase , self ).apply_gradients(zip(lowerCamelCase , lowerCamelCase ) , name=lowerCamelCase , **lowerCamelCase )
def __a ( self : List[str] , lowerCamelCase : Dict , lowerCamelCase : str , lowerCamelCase : Optional[int] ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowerCamelCase_ : Union[str, Any] = apply_state or {}
lowerCamelCase_ : Optional[int] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowerCamelCase_ : List[str] = self._fallback_apply_state(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Dict = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def __a ( self : Dict , lowerCamelCase : Any , lowerCamelCase : Dict , lowerCamelCase : int=None ):
lowerCamelCase_ : Tuple = self._get_lr(var.device , var.dtype.base_dtype , lowerCamelCase )
lowerCamelCase_ : int = self._decay_weights_op(lowerCamelCase , lowerCamelCase , lowerCamelCase )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase , self )._resource_apply_dense(lowerCamelCase , lowerCamelCase , **lowerCamelCase )
def __a ( self : int , lowerCamelCase : str , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : str=None ):
lowerCamelCase_ : Union[str, Any] = self._get_lr(var.device , var.dtype.base_dtype , lowerCamelCase )
lowerCamelCase_ : Optional[Any] = self._decay_weights_op(lowerCamelCase , lowerCamelCase , lowerCamelCase )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase , self )._resource_apply_sparse(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase )
def __a ( self : Any ):
lowerCamelCase_ : Optional[Any] = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def __a ( self : List[str] , lowerCamelCase : int ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowerCamelCase , lowerCamelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowerCamelCase , lowerCamelCase ) is not None:
return False
return True
class UpperCamelCase_ ( lowercase__ ):
def __init__( self : Dict ):
lowerCamelCase_ : Dict = []
lowerCamelCase_ : List[Any] = None
@property
def __a ( self : List[str] ):
if self._accum_steps is None:
lowerCamelCase_ : Optional[Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=lowerCamelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def __a ( self : str ):
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : int , lowerCamelCase : Optional[int] ):
if not self._gradients:
lowerCamelCase_ : int = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowerCamelCase ) , trainable=lowerCamelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowerCamelCase ) != len(self._gradients ):
raise ValueError(F"Expected {len(self._gradients )} gradients, but got {len(lowerCamelCase )}" )
for accum_gradient, gradient in zip(self._gradients , lowerCamelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowerCamelCase )
self._accum_steps.assign_add(1 )
def __a ( self : Any ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowerCamelCase ) )
| 364 |
'''simple docstring'''
from __future__ import annotations
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = order
# a_{0} ... a_{k}
UpperCAmelCase : Optional[int] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCAmelCase : List[Any] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCAmelCase : Dict = [0.0] * self.order
# y[n-1] ... y[n-k]
UpperCAmelCase : Optional[Any] = [0.0] * self.order
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
if len(snake_case ) < self.order:
UpperCAmelCase : Dict = [1.0, *a_coeffs]
if len(snake_case ) != self.order + 1:
UpperCAmelCase : Optional[Any] = (
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(snake_case )}"
)
raise ValueError(snake_case )
if len(snake_case ) != self.order + 1:
UpperCAmelCase : Optional[Any] = (
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(snake_case )}"
)
raise ValueError(snake_case )
UpperCAmelCase : Optional[int] = a_coeffs
UpperCAmelCase : Optional[Any] = b_coeffs
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCAmelCase : Optional[int] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCAmelCase : List[str] = self.input_history[:-1]
UpperCAmelCase : List[Any] = self.output_history[:-1]
UpperCAmelCase : str = sample
UpperCAmelCase : str = result
return result
| 679 | 0 |
def __magic_name__ ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = len(lowercase_ )
for i in range(n - 1 ):
for j in range(i + 1 , lowercase_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def __magic_name__ ( lowercase_ ) -> int:
'''simple docstring'''
if len(lowercase_ ) <= 1:
return arr, 0
UpperCamelCase = len(lowercase_ ) // 2
UpperCamelCase = arr[0:mid]
UpperCamelCase = arr[mid:]
UpperCamelCase = count_inversions_recursive(lowercase_ )
UpperCamelCase = count_inversions_recursive(lowercase_ )
UpperCamelCase = _count_cross_inversions(lowercase_ , lowercase_ )
UpperCamelCase = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def __magic_name__ ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = 0
while i < len(lowercase_ ) and j < len(lowercase_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowercase_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(lowercase_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
UpperCamelCase = count_inversions_bf(lowercase_ )
UpperCamelCase = count_inversions_recursive(lowercase_ )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , lowercase_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
UpperCamelCase = count_inversions_bf(lowercase_ )
UpperCamelCase = count_inversions_recursive(lowercase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , lowercase_ )
# an empty list should also have zero inversions
UpperCamelCase = []
UpperCamelCase = count_inversions_bf(lowercase_ )
UpperCamelCase = count_inversions_recursive(lowercase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , lowercase_ )
if __name__ == "__main__":
main()
| 606 |
'''simple docstring'''
import argparse
from collections import defaultdict
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = F"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Tuple = f.readlines()
UpperCAmelCase : Tuple = F"class {class_name}("
UpperCAmelCase : str = F"{4 * ' '}def {test_name}("
UpperCAmelCase : Dict = F"{8 * ' '}{correct_line.split()[0]}"
UpperCAmelCase : Tuple = F"{16 * ' '}{correct_line.split()[0]}"
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : List[str] = False
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : Dict = False
UpperCAmelCase : Tuple = 0
UpperCAmelCase : int = 0
UpperCAmelCase : Tuple = []
for line in lines:
if line.startswith(__magic_name__ ):
UpperCAmelCase : int = True
elif in_class and line.startswith(__magic_name__ ):
UpperCAmelCase : Dict = True
elif in_class and in_func and (line.startswith(__magic_name__ ) or line.startswith(__magic_name__ )):
UpperCAmelCase : List[str] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCAmelCase : List[str] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCAmelCase : List[str] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"{spaces * ' '}{correct_line}" )
UpperCAmelCase : List[str] = False
else:
new_lines.append(__magic_name__ )
with open(__magic_name__ , "w" ) as f:
for line in new_lines:
f.write(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__=None ):
'''simple docstring'''
if fail is not None:
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Optional[int] = {l.strip() for l in f.readlines()}
else:
UpperCAmelCase : Any = None
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Tuple = f.readlines()
UpperCAmelCase : int = defaultdict(__magic_name__ )
for line in correct_lines:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
a : List[Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 679 | 0 |
"""simple docstring"""
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = logging.get_logger()
# the current default level is logging.WARNING
_lowerCAmelCase : Dict = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = logging.get_verbosity()
_lowerCAmelCase : Union[str, Any] = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowerCAmelCase : Dict = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(_A ) as cl:
logger.warning(_A )
self.assertEqual(cl.out ,msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(_A ) as cl:
logger.warning(_A )
self.assertEqual(cl.out ,'' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(_A ) as cl:
logger.warning(_A )
self.assertEqual(cl.out ,msg + '\n' )
# restore to the original level
logging.set_verbosity(_A )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def __lowerCamelCase ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
_lowerCAmelCase : str = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowerCAmelCase : List[Any] = os.getenv('TRANSFORMERS_VERBOSITY' ,_A )
_lowerCAmelCase : Any = logging.log_levels[env_level_str]
_lowerCAmelCase : str = logging.get_verbosity()
self.assertEqual(
_A ,_A ,F"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" ,)
# restore to the original level
_lowerCAmelCase : Any = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def __lowerCamelCase ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
_lowerCAmelCase : List[str] = logging.logging.getLogger()
with CaptureLogger(_A ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' ,cl.out )
# no need to restore as nothing was changed
def __lowerCamelCase ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
_lowerCAmelCase : str = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowerCAmelCase : List[Any] = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(_A ) as cl:
logger.warning_advice(_A )
self.assertEqual(cl.out ,'' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(_A ) as cl:
logger.warning_advice(_A )
self.assertEqual(cl.out ,msg + '\n' )
def lowerCamelCase__ ( ):
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 259 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : TreeNode | None = None
SCREAMING_SNAKE_CASE__ : TreeNode | None = None
a : Optional[Any] = namedtuple("CoinsDistribResult", "moves excess")
def lowercase ( __magic_name__ ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(__magic_name__ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__magic_name__ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__magic_name__ ) != count_coins(__magic_name__ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(__magic_name__ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = get_distrib(node.left )
UpperCAmelCase , UpperCAmelCase : Any = get_distrib(node.right )
UpperCAmelCase : Optional[Any] = 1 - left_distrib_excess
UpperCAmelCase : int = 1 - right_distrib_excess
UpperCAmelCase : List[Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(__magic_name__ )
+ abs(__magic_name__ )
)
UpperCAmelCase : List[Any] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__magic_name__ , __magic_name__ )
return get_distrib(__magic_name__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679 | 0 |
def a_ ( __lowerCAmelCase = 10**12 ):
lowerCAmelCase__ = 1
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
lowerCAmelCase__ = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F"{solution() = }")
| 615 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a : List[Any] = logging.get_logger(__name__)
a : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a : int = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a : Any = {
"allenai/led-base-16384": 1_63_84,
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Tuple = LEDTokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , snake_case=True , **snake_case , ):
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(snake_case , pre_tok_state.pop("type" ) )
UpperCAmelCase : Any = add_prefix_space
UpperCAmelCase : str = pre_tok_class(**snake_case )
UpperCAmelCase : int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase : Dict = "post_processor"
UpperCAmelCase : Dict = getattr(self.backend_tokenizer , snake_case , snake_case )
if tokenizer_component_instance:
UpperCAmelCase : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase : int = tuple(state["sep"] )
if "cls" in state:
UpperCAmelCase : Union[str, Any] = tuple(state["cls"] )
UpperCAmelCase : Tuple = False
if state.get("add_prefix_space" , snake_case ) != add_prefix_space:
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Optional[int] = True
if state.get("trim_offsets" , snake_case ) != trim_offsets:
UpperCAmelCase : Tuple = trim_offsets
UpperCAmelCase : List[str] = True
if changes_to_apply:
UpperCAmelCase : Optional[Any] = getattr(snake_case , state.pop("type" ) )
UpperCAmelCase : Tuple = component_class(**snake_case )
setattr(self.backend_tokenizer , snake_case , snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def A_ ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value
UpperCAmelCase : Optional[Any] = value
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case , **snake_case )
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case , **snake_case )
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : str = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def A_ ( self , snake_case , snake_case=None ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self , snake_case , snake_case = None , snake_case = PaddingStrategy.DO_NOT_PAD , snake_case = None , snake_case = None , ):
'''simple docstring'''
UpperCAmelCase : int = super()._pad(
encoded_inputs=snake_case , max_length=snake_case , padding_strategy=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , )
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase : int = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase : Optional[int] = len(encoded_inputs["global_attention_mask"] ) != len(snake_case )
if needs_to_be_padded:
UpperCAmelCase : Tuple = len(snake_case ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase : List[str] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase : Any = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 679 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class __magic_name__ ( lowercase__ ):
SCREAMING_SNAKE_CASE = "donut-swin"
SCREAMING_SNAKE_CASE = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , __snake_case=224 , __snake_case=4 , __snake_case=3 , __snake_case=96 , __snake_case=[2, 2, 6, 2] , __snake_case=[3, 6, 12, 24] , __snake_case=7 , __snake_case=4.0 , __snake_case=True , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.1 , __snake_case="gelu" , __snake_case=False , __snake_case=0.02 , __snake_case=1e-5 , **__snake_case , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__snake_case )
__a =image_size
__a =patch_size
__a =num_channels
__a =embed_dim
__a =depths
__a =len(__snake_case )
__a =num_heads
__a =window_size
__a =mlp_ratio
__a =qkv_bias
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =drop_path_rate
__a =hidden_act
__a =use_absolute_embeddings
__a =layer_norm_eps
__a =initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__a =int(embed_dim * 2 ** (len(__snake_case ) - 1) )
| 242 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowercase ( __magic_name__="" ):
'''simple docstring'''
UpperCAmelCase : Dict = tempfile.mkdtemp()
return os.path.join(__magic_name__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
UpperCAmelCase : int = AgentAudio(snake_case )
UpperCAmelCase : str = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(snake_case ) )
# Ensure that the file contains the same value as the original tensor
UpperCAmelCase , UpperCAmelCase : str = sf.read(snake_case )
self.assertTrue(torch.allclose(snake_case , torch.tensor(snake_case ) , atol=1e-4 ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
UpperCAmelCase : Any = get_new_path(suffix=".wav" )
sf.write(snake_case , snake_case , 1_6_0_0_0 )
UpperCAmelCase : Optional[Any] = AgentAudio(snake_case )
self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , snake_case )
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) )
UpperCAmelCase : Tuple = AgentImage(snake_case )
UpperCAmelCase : Tuple = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
UpperCAmelCase : Any = Image.open(snake_case )
UpperCAmelCase : List[str] = AgentImage(snake_case )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
UpperCAmelCase : Dict = Image.open(snake_case )
UpperCAmelCase : int = AgentImage(snake_case )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = "Hey!"
UpperCAmelCase : Tuple = AgentText(snake_case )
self.assertEqual(snake_case , agent_type.to_string() )
self.assertEqual(snake_case , agent_type.to_raw() )
self.assertEqual(snake_case , snake_case )
| 679 | 0 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A : List[Any] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DebertaVaTokenizer
lowerCamelCase__ = DebertaVaTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def _snake_case ( self : Union[str, Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE = DebertaVaTokenizer(__lowerCamelCase , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self : Optional[Any] , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = "this is a test"
SCREAMING_SNAKE_CASE = "this is a test"
return input_text, output_text
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = "<pad>"
SCREAMING_SNAKE_CASE = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(__lowerCamelCase ) , 30001 )
def _snake_case ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = " \tHeLLo!how \n Are yoU? "
SCREAMING_SNAKE_CASE = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
SCREAMING_SNAKE_CASE = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def _snake_case ( self : Union[str, Any] ):
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def _snake_case ( self : Union[str, Any] ):
pass
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
SCREAMING_SNAKE_CASE = DebertaVaTokenizer(__lowerCamelCase , split_by_punct=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = DebertaVaTokenizerFast(__lowerCamelCase , split_by_punct=__lowerCamelCase )
SCREAMING_SNAKE_CASE = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
SCREAMING_SNAKE_CASE = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
SCREAMING_SNAKE_CASE = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
SCREAMING_SNAKE_CASE = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
SCREAMING_SNAKE_CASE = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
SCREAMING_SNAKE_CASE = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
SCREAMING_SNAKE_CASE = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = " \tHeLLo!how \n Are yoU? "
SCREAMING_SNAKE_CASE = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
SCREAMING_SNAKE_CASE = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
SCREAMING_SNAKE_CASE = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
SCREAMING_SNAKE_CASE = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = "This is a test"
SCREAMING_SNAKE_CASE = [13, 1, 4398, 25, 21, 1289]
SCREAMING_SNAKE_CASE = ["▁", "T", "his", "▁is", "▁a", "▁test"]
SCREAMING_SNAKE_CASE = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
SCREAMING_SNAKE_CASE = DebertaVaTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
SCREAMING_SNAKE_CASE = DebertaVaTokenizerFast(__lowerCamelCase , keep_accents=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = rust_tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
# fmt: off
SCREAMING_SNAKE_CASE = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
SCREAMING_SNAKE_CASE = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
SCREAMING_SNAKE_CASE = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = rust_tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = DebertaVaTokenizer(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.encode("sequence builders" )
SCREAMING_SNAKE_CASE = tokenizer.encode("multi-sequence build" )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __lowerCamelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __lowerCamelCase , )
@slow
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = {"input_ids": [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , ) | 16 |
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
def get_masked_lm_array(__magic_name__ ):
UpperCAmelCase : Tuple = F"masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : List[str] = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : str = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_array(__magic_name__ ):
UpperCAmelCase : List[Any] = F"encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : Optional[Any] = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : str = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_layer_array(__magic_name__ , __magic_name__ ):
UpperCAmelCase : Union[str, Any] = F"encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : int = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : Optional[int] = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_attention_layer_array(__magic_name__ , __magic_name__ , __magic_name__ ):
UpperCAmelCase : Tuple = F"encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : List[str] = tf.train.load_variable(__magic_name__ , __magic_name__ )
UpperCAmelCase : int = array.reshape(__magic_name__ )
if "kernel" in name:
UpperCAmelCase : Optional[Any] = array.transpose()
return torch.from_numpy(__magic_name__ )
print(F"Loading model based on config from {config_path}..." )
UpperCAmelCase : Optional[Any] = BertConfig.from_json_file(__magic_name__ )
UpperCAmelCase : Optional[Any] = BertForMaskedLM(__magic_name__ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
UpperCAmelCase : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
UpperCAmelCase : BertSelfAttention = layer.attention.self
UpperCAmelCase : List[Any] = get_encoder_attention_layer_array(
__magic_name__ , "_query_dense/kernel" , self_attn.query.weight.data.shape )
UpperCAmelCase : Tuple = get_encoder_attention_layer_array(
__magic_name__ , "_query_dense/bias" , self_attn.query.bias.data.shape )
UpperCAmelCase : int = get_encoder_attention_layer_array(
__magic_name__ , "_key_dense/kernel" , self_attn.key.weight.data.shape )
UpperCAmelCase : Optional[int] = get_encoder_attention_layer_array(
__magic_name__ , "_key_dense/bias" , self_attn.key.bias.data.shape )
UpperCAmelCase : Tuple = get_encoder_attention_layer_array(
__magic_name__ , "_value_dense/kernel" , self_attn.value.weight.data.shape )
UpperCAmelCase : str = get_encoder_attention_layer_array(
__magic_name__ , "_value_dense/bias" , self_attn.value.bias.data.shape )
# Self-attention Output
UpperCAmelCase : BertSelfOutput = layer.attention.output
UpperCAmelCase : str = get_encoder_attention_layer_array(
__magic_name__ , "_output_dense/kernel" , self_output.dense.weight.data.shape )
UpperCAmelCase : Union[str, Any] = get_encoder_attention_layer_array(
__magic_name__ , "_output_dense/bias" , self_output.dense.bias.data.shape )
UpperCAmelCase : str = get_encoder_layer_array(__magic_name__ , "_attention_layer_norm/gamma" )
UpperCAmelCase : List[str] = get_encoder_layer_array(__magic_name__ , "_attention_layer_norm/beta" )
# Intermediate
UpperCAmelCase : BertIntermediate = layer.intermediate
UpperCAmelCase : Dict = get_encoder_layer_array(__magic_name__ , "_intermediate_dense/kernel" )
UpperCAmelCase : Tuple = get_encoder_layer_array(__magic_name__ , "_intermediate_dense/bias" )
# Output
UpperCAmelCase : BertOutput = layer.output
UpperCAmelCase : Optional[Any] = get_encoder_layer_array(__magic_name__ , "_output_dense/kernel" )
UpperCAmelCase : Optional[Any] = get_encoder_layer_array(__magic_name__ , "_output_dense/bias" )
UpperCAmelCase : List[str] = get_encoder_layer_array(__magic_name__ , "_output_layer_norm/gamma" )
UpperCAmelCase : Any = get_encoder_layer_array(__magic_name__ , "_output_layer_norm/beta" )
# Embeddings
UpperCAmelCase : int = get_encoder_array("_position_embedding_layer/embeddings" )
UpperCAmelCase : str = get_encoder_array("_type_embedding_layer/embeddings" )
UpperCAmelCase : Optional[Any] = get_encoder_array("_embedding_norm_layer/gamma" )
UpperCAmelCase : Any = get_encoder_array("_embedding_norm_layer/beta" )
# LM Head
UpperCAmelCase : str = model.cls.predictions.transform
UpperCAmelCase : List[Any] = get_masked_lm_array("dense/kernel" )
UpperCAmelCase : List[Any] = get_masked_lm_array("dense/bias" )
UpperCAmelCase : Optional[Any] = get_masked_lm_array("layer_norm/gamma" )
UpperCAmelCase : Union[str, Any] = get_masked_lm_array("layer_norm/beta" )
UpperCAmelCase : Optional[Any] = get_masked_lm_array("embedding_table" )
# Pooling
UpperCAmelCase : str = BertPooler(config=__magic_name__ )
UpperCAmelCase : BertPooler = get_encoder_array("_pooler_layer/kernel" )
UpperCAmelCase : BertPooler = get_encoder_array("_pooler_layer/bias" )
# Export final model
model.save_pretrained(__magic_name__ )
# Integration test - should load without any errors ;)
UpperCAmelCase : Optional[int] = BertForMaskedLM.from_pretrained(__magic_name__ )
print(new_model.eval() )
print("Model conversion was done sucessfully!" )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
a : Any = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 679 | 0 |
from __future__ import annotations
class __lowercase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase ) -> Tuple:
A : str = order
# a_{0} ... a_{k}
A : Optional[int] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
A : List[Any] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
A : Dict = [0.0] * self.order
# y[n-1] ... y[n-k]
A : Optional[Any] = [0.0] * self.order
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ) -> int:
if len(__UpperCAmelCase ) < self.order:
A : Dict = [1.0, *a_coeffs]
if len(__UpperCAmelCase ) != self.order + 1:
A : Optional[Any] = (
f'Expected a_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(__UpperCAmelCase )}'
)
raise ValueError(__UpperCAmelCase )
if len(__UpperCAmelCase ) != self.order + 1:
A : Optional[Any] = (
f'Expected b_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(__UpperCAmelCase )}'
)
raise ValueError(__UpperCAmelCase )
A : Optional[int] = a_coeffs
A : Optional[Any] = b_coeffs
def snake_case ( self , __UpperCAmelCase ) -> Optional[int]:
A : Optional[Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
A : Optional[int] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
A : List[str] = self.input_history[:-1]
A : List[Any] = self.output_history[:-1]
A : str = sample
A : str = result
return result
| 542 |
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
a : str = "src/transformers"
# Matches is_xxx_available()
a : Union[str, Any] = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
a : int = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a : Any = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
a : Dict = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
a : Any = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a : List[str] = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
a : Union[str, Any] = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
a : List[str] = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
a : Any = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
a : Union[str, Any] = re.compile(R"^\s*try:")
# Catches a line with else:
a : Tuple = re.compile(R"^\s*else:")
def lowercase ( __magic_name__ ):
'''simple docstring'''
if _re_test_backend.search(__magic_name__ ) is None:
return None
UpperCAmelCase : Optional[int] = [b[0] for b in _re_backend.findall(__magic_name__ )]
backends.sort()
return "_and_".join(__magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
with open(__magic_name__ , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase : str = f.readlines()
UpperCAmelCase : Optional[int] = 0
while line_index < len(__magic_name__ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__magic_name__ ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCAmelCase : str = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
UpperCAmelCase : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__magic_name__ ):
UpperCAmelCase : int = _re_one_line_import_struct.search(__magic_name__ ).groups()[0]
UpperCAmelCase : Any = re.findall("\[([^\]]+)\]" , __magic_name__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
UpperCAmelCase : Optional[int] = _re_import_struct_key_value.search(__magic_name__ )
if single_line_import_search is not None:
UpperCAmelCase : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
UpperCAmelCase : Dict = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCAmelCase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase : List[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
UpperCAmelCase : List[str] = lines[line_index]
if _re_import_struct_add_one.search(__magic_name__ ) is not None:
objects.append(_re_import_struct_add_one.search(__magic_name__ ).groups()[0] )
elif _re_import_struct_add_many.search(__magic_name__ ) is not None:
UpperCAmelCase : List[str] = _re_import_struct_add_many.search(__magic_name__ ).groups()[0].split(", " )
UpperCAmelCase : int = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif _re_between_brackets.search(__magic_name__ ) is not None:
UpperCAmelCase : Optional[Any] = _re_between_brackets.search(__magic_name__ ).groups()[0].split(", " )
UpperCAmelCase : Optional[int] = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif _re_quote_object.search(__magic_name__ ) is not None:
objects.append(_re_quote_object.search(__magic_name__ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
UpperCAmelCase : Optional[int] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCAmelCase : List[str] = []
while (
line_index < len(__magic_name__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
UpperCAmelCase : int = lines[line_index]
UpperCAmelCase : Tuple = _re_import.search(__magic_name__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCAmelCase : Optional[Any] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__magic_name__ ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCAmelCase : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
UpperCAmelCase : str = lines[line_index]
UpperCAmelCase : Tuple = _re_import.search(__magic_name__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCAmelCase : Dict = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
def find_duplicates(__magic_name__ ):
return [k for k, v in collections.Counter(__magic_name__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCAmelCase : Tuple = []
for key in import_dict_objects.keys():
UpperCAmelCase : List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
UpperCAmelCase : Any = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCAmelCase : List[Any] = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : int = []
for root, _, files in os.walk(__magic_name__ ):
if "__init__.py" in files:
UpperCAmelCase : Dict = os.path.join(__magic_name__ , "__init__.py" )
UpperCAmelCase : Optional[Any] = parse_init(__magic_name__ )
if objects is not None:
UpperCAmelCase : int = analyze_results(*__magic_name__ )
if len(__magic_name__ ) > 0:
UpperCAmelCase : Union[str, Any] = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(__magic_name__ ) )
if len(__magic_name__ ) > 0:
raise ValueError("\n\n".join(__magic_name__ ) )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = []
for path, directories, files in os.walk(__magic_name__ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__magic_name__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__magic_name__ ) / folder).glob("*.py" ) ) ) == 0:
continue
UpperCAmelCase : Any = str((Path(__magic_name__ ) / folder).relative_to(__magic_name__ ) )
UpperCAmelCase : Optional[Any] = short_path.replace(os.path.sep , "." )
submodules.append(__magic_name__ )
for fname in files:
if fname == "__init__.py":
continue
UpperCAmelCase : List[str] = str((Path(__magic_name__ ) / fname).relative_to(__magic_name__ ) )
UpperCAmelCase : str = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__magic_name__ )
return submodules
a : str = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = importlib.util.spec_from_file_location(
"transformers" , os.path.join(__magic_name__ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
UpperCAmelCase : Optional[int] = spec.loader.load_module()
UpperCAmelCase : Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__magic_name__ ) > 0:
UpperCAmelCase : List[str] = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 679 | 0 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowerCamelCase : int = "Usage of script: script_name <size_of_canvas:int>"
lowerCamelCase : Dict = [0] * 100 + [1] * 10
random.shuffle(choice)
def __lowerCAmelCase ( __snake_case ):
__lowerCAmelCase = [[False for i in range(__snake_case )] for j in range(__snake_case )]
return canvas
def __lowerCAmelCase ( __snake_case ):
for i, row in enumerate(__snake_case ):
for j, _ in enumerate(__snake_case ):
__lowerCAmelCase = bool(random.getrandbits(1 ) )
def __lowerCAmelCase ( __snake_case ):
__lowerCAmelCase = np.array(__snake_case )
__lowerCAmelCase = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__snake_case ):
for c, pt in enumerate(__snake_case ):
__lowerCAmelCase = __judge_point(
__snake_case , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__lowerCAmelCase = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__lowerCAmelCase = current_canvas.tolist()
return return_canvas
def __lowerCAmelCase ( __snake_case , __snake_case ):
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__lowerCAmelCase = pt
if pt:
if alive < 2:
__lowerCAmelCase = False
elif alive == 2 or alive == 3:
__lowerCAmelCase = True
elif alive > 3:
__lowerCAmelCase = False
else:
if alive == 3:
__lowerCAmelCase = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowerCamelCase : List[str] = int(sys.argv[1])
# main working structure of this module.
lowerCamelCase : List[str] = create_canvas(canvas_size)
seed(c)
lowerCamelCase : str = plt.subplots()
fig.show()
lowerCamelCase : Optional[int] = ListedColormap(['''w''', '''k'''])
try:
while True:
lowerCamelCase : Optional[Any] = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 367 |
'''simple docstring'''
import os
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = os.path.dirname(os.path.realpath(__magic_name__ ) )
UpperCAmelCase : Any = os.path.join(__magic_name__ , "triangle.txt" )
with open(__magic_name__ ) as f:
UpperCAmelCase : str = f.readlines()
UpperCAmelCase : Optional[int] = []
for line in triangle:
UpperCAmelCase : List[str] = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(__magic_name__ ) )
a.append(__magic_name__ )
for i in range(1 , len(__magic_name__ ) ):
for j in range(len(a[i] ) ):
UpperCAmelCase : Union[str, Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0
UpperCAmelCase : List[str] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__magic_name__ , __magic_name__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 679 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase : int = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = ["MobileViTFeatureExtractor"]
lowercase : str = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 568 |
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if n == 1 or not isinstance(__magic_name__ , __magic_name__ ):
return 0
elif n == 2:
return 1
else:
UpperCAmelCase : Optional[int] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Union[str, Any] = 2
while digits < n:
index += 1
UpperCAmelCase : Any = len(str(fibonacci(__magic_name__ ) ) )
return index
def lowercase ( __magic_name__ = 1000 ):
'''simple docstring'''
return fibonacci_digits_index(__magic_name__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 679 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _snake_case:
__snake_case: int
__snake_case: TreeNode | None = None
__snake_case: TreeNode | None = None
lowerCAmelCase_ = namedtuple('''CoinsDistribResult''', '''moves excess''')
def _A ( UpperCAmelCase ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(UpperCAmelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(UpperCAmelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(UpperCAmelCase ) != count_coins(UpperCAmelCase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(UpperCAmelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 ,1 )
A__ = get_distrib(node.left )
A__ = get_distrib(node.right )
A__ = 1 - left_distrib_excess
A__ = 1 - right_distrib_excess
A__ = (
left_distrib_moves
+ right_distrib_moves
+ abs(UpperCAmelCase )
+ abs(UpperCAmelCase )
)
A__ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(UpperCAmelCase ,UpperCAmelCase )
return get_distrib(UpperCAmelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 531 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
a : List[str] = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
a : Dict = {
"169M": 7_68,
"430M": 10_24,
"1B5": 20_48,
"3B": 25_60,
"7B": 40_96,
"14B": 51_20,
}
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = list(state_dict.keys() )
for name in state_dict_keys:
UpperCAmelCase : str = state_dict.pop(__magic_name__ )
# emb -> embedding
if name.startswith("emb." ):
UpperCAmelCase : str = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
UpperCAmelCase : int = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
UpperCAmelCase : Optional[int] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __magic_name__ )
# ffn -> feed_forward
UpperCAmelCase : Tuple = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __magic_name__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
UpperCAmelCase : Optional[Any] = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
UpperCAmelCase : List[str] = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
UpperCAmelCase : List[Any] = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
UpperCAmelCase : List[str] = "rwkv." + name
UpperCAmelCase : List[Any] = weight
return state_dict
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=False , __magic_name__=None ):
'''simple docstring'''
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
UpperCAmelCase : List[str] = 5_0277
UpperCAmelCase : str = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
UpperCAmelCase : List[Any] = PreTrainedTokenizerFast(tokenizer_file=__magic_name__ )
UpperCAmelCase : List[Any] = len(__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
# 2. Build the config
UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
UpperCAmelCase : Union[str, Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." )
UpperCAmelCase : str = RwkvConfig(
vocab_size=__magic_name__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__magic_name__ )
# 3. Download model file then convert state_dict
UpperCAmelCase : Union[str, Any] = hf_hub_download(__magic_name__ , __magic_name__ )
UpperCAmelCase : Optional[Any] = torch.load(__magic_name__ , map_location="cpu" )
UpperCAmelCase : Union[str, Any] = convert_state_dict(__magic_name__ )
# 4. Split in shards and save
UpperCAmelCase , UpperCAmelCase : Any = shard_checkpoint(__magic_name__ )
for shard_file, shard in shards.items():
torch.save(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) )
if index is not None:
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
# Save the index as well
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
UpperCAmelCase : List[Any] = json.dumps(__magic_name__ , indent=2 , sort_keys=__magic_name__ ) + "\n"
f.write(__magic_name__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
UpperCAmelCase : Any = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
UpperCAmelCase : Dict = torch.load(os.path.join(__magic_name__ , __magic_name__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__magic_name__ , __magic_name__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
UpperCAmelCase : int = AutoModelForCausalLM.from_pretrained(__magic_name__ )
model.push_to_hub(__magic_name__ , max_shard_size="2GB" )
tokenizer.push_to_hub(__magic_name__ )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
a : Dict = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 679 | 0 |
def _lowerCamelCase ( lowerCamelCase_: Tuple ):
'''simple docstring'''
try:
A : int = float(lowerCamelCase_ )
except ValueError:
raise ValueError('''Please enter a valid number''' )
A : Tuple = decimal - int(lowerCamelCase_ )
if fractional_part == 0:
return int(lowerCamelCase_ ), 1
else:
A : List[Any] = len(str(lowerCamelCase_ ).split('''.''' )[1] )
A : str = int(decimal * (10**number_of_frac_digits) )
A : str = 10**number_of_frac_digits
A : str = denominator, numerator
while True:
A : Optional[int] = dividend % divisor
if remainder == 0:
break
A : List[str] = divisor, remainder
A : Dict = numerator / divisor, denominator / divisor
return int(lowerCamelCase_ ), int(lowerCamelCase_ )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction('67') = }''')
print(F'''{decimal_to_fraction('45.0') = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction('6.25') = }''')
print(F'''{decimal_to_fraction('78td') = }''') | 256 |
'''simple docstring'''
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase : Optional[Any] = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
UpperCAmelCase : List[Any] = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
UpperCAmelCase : Dict = max(len(__magic_name__ ) , len(__magic_name__ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(__magic_name__ ) , b_binary.zfill(__magic_name__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679 | 0 |
import numpy
# List of input, output pairs
_lowercase : Union[str, Any] =(
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_lowercase : Optional[int] =(((515, 22, 13), 555), ((61, 35, 49), 150))
_lowercase : Dict =[2, 4, 1, 5]
_lowercase : Optional[Any] =len(train_data)
_lowercase : Optional[int] =0.009
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__="train" ):
return calculate_hypothesis_value(lowerCAmelCase__ ,lowerCAmelCase__ ) - output(
lowerCAmelCase__ ,lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
lowerCamelCase_ : Optional[Any] = 0
for i in range(len(lowerCAmelCase__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__=m ):
lowerCamelCase_ : Any = 0
for i in range(lowerCAmelCase__ ):
if index == -1:
summation_value += _error(lowerCAmelCase__ )
else:
summation_value += _error(lowerCAmelCase__ ) * train_data[i][0][index]
return summation_value
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
lowerCamelCase_ : Tuple = summation_of_cost_derivative(lowerCAmelCase__ ,lowerCAmelCase__ ) / m
return cost_derivative_value
def _SCREAMING_SNAKE_CASE ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCamelCase_ : List[str] = 0.000_002
lowerCamelCase_ : Tuple = 0
lowerCamelCase_ : Optional[Any] = 0
while True:
j += 1
lowerCamelCase_ : str = [0, 0, 0, 0]
for i in range(0 ,len(lowerCAmelCase__ ) ):
lowerCamelCase_ : Any = get_cost_derivative(i - 1 )
lowerCamelCase_ : Any = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCAmelCase__ ,lowerCAmelCase__ ,atol=lowerCAmelCase__ ,rtol=lowerCAmelCase__ ,):
break
lowerCamelCase_ : List[str] = temp_parameter_vector
print(('Number of iterations:', j) )
def _SCREAMING_SNAKE_CASE ( ):
for i in range(len(lowerCAmelCase__ ) ):
print(('Actual output value:', output(lowerCAmelCase__ ,'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(lowerCAmelCase__ ,'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 364 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
a : Optional[Any] = "pt"
elif is_tf_available():
a : List[Any] = "tf"
else:
a : List[Any] = "jax"
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = PerceiverTokenizer
SCREAMING_SNAKE_CASE__ : List[str] = False
def A_ ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : List[str] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A_ ( self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def A_ ( self , **snake_case ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def A_ ( self , snake_case , snake_case=False , snake_case=2_0 , snake_case=5 ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = []
for i in range(len(snake_case ) ):
try:
UpperCAmelCase : int = tokenizer.decode([i] , clean_up_tokenization_spaces=snake_case )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCAmelCase : Optional[int] = list(filter(lambda snake_case : re.match(r"^[ a-zA-Z]+$" , t[1] ) , snake_case ) )
UpperCAmelCase : Any = list(filter(lambda snake_case : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=snake_case ) , snake_case ) )
if max_length is not None and len(snake_case ) > max_length:
UpperCAmelCase : Optional[Any] = toks[:max_length]
if min_length is not None and len(snake_case ) < min_length and len(snake_case ) > 0:
while len(snake_case ) < min_length:
UpperCAmelCase : Any = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase : Dict = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase : Any = tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case )
if " " not in output_txt and len(snake_case ) > 1:
UpperCAmelCase : Dict = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=snake_case )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=snake_case )
)
if with_prefix_space:
UpperCAmelCase : Union[str, Any] = " " + output_txt
UpperCAmelCase : Dict = tokenizer.encode(snake_case , add_special_tokens=snake_case )
return output_txt, output_ids
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.perceiver_tokenizer
UpperCAmelCase : Tuple = "Unicode €."
UpperCAmelCase : int = tokenizer(snake_case )
UpperCAmelCase : Tuple = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded["input_ids"] , snake_case )
# decoding
UpperCAmelCase : Optional[Any] = tokenizer.decode(snake_case )
self.assertEqual(snake_case , "[CLS]Unicode €.[SEP]" )
UpperCAmelCase : Tuple = tokenizer("e è é ê ë" )
UpperCAmelCase : str = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded["input_ids"] , snake_case )
# decoding
UpperCAmelCase : Dict = tokenizer.decode(snake_case )
self.assertEqual(snake_case , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.perceiver_tokenizer
UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
UpperCAmelCase : List[str] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
UpperCAmelCase : Dict = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case )
self.assertIsInstance(snake_case , snake_case )
if FRAMEWORK != "jax":
UpperCAmelCase : List[Any] = list(batch.input_ids.numpy()[0] )
else:
UpperCAmelCase : str = list(batch.input_ids.tolist()[0] )
self.assertListEqual(snake_case , snake_case )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.perceiver_tokenizer
UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase : List[Any] = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , snake_case )
self.assertIn("attention_mask" , snake_case )
self.assertNotIn("decoder_input_ids" , snake_case )
self.assertNotIn("decoder_attention_mask" , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.perceiver_tokenizer
UpperCAmelCase : int = [
"Summary of the text.",
"Another summary.",
]
UpperCAmelCase : List[Any] = tokenizer(
text_target=snake_case , max_length=3_2 , padding="max_length" , truncation=snake_case , return_tensors=snake_case )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
UpperCAmelCase : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase : Dict = tempfile.mkdtemp()
UpperCAmelCase : Any = " He is very happy, UNwant\u00E9d,running"
UpperCAmelCase : int = tokenizer.encode(snake_case , add_special_tokens=snake_case )
tokenizer.save_pretrained(snake_case )
UpperCAmelCase : List[str] = tokenizer.__class__.from_pretrained(snake_case )
UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
shutil.rmtree(snake_case )
UpperCAmelCase : Dict = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase : str = tempfile.mkdtemp()
UpperCAmelCase : int = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
UpperCAmelCase : int = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase : List[str] = tokenizer.encode(snake_case , add_special_tokens=snake_case )
tokenizer.save_pretrained(snake_case )
UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(snake_case )
UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
UpperCAmelCase : Optional[int] = tokenizer.__class__.from_pretrained(snake_case , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(snake_case )
with open(os.path.join(snake_case , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase : Union[str, Any] = json.load(snake_case )
with open(os.path.join(snake_case , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase : Any = json.load(snake_case )
UpperCAmelCase : str = [f"<extra_id_{i}>" for i in range(1_2_5 )]
UpperCAmelCase : List[Any] = added_tokens_extra_ids + [
"an_additional_special_token"
]
UpperCAmelCase : List[str] = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(snake_case , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case , snake_case )
with open(os.path.join(snake_case , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case , snake_case )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase : Optional[Any] = tokenizer_class.from_pretrained(
snake_case , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase : Optional[int] = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=snake_case )]
UpperCAmelCase : Optional[int] = tokenizer_class.from_pretrained(
snake_case , additional_special_tokens=snake_case , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , "�" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.get_tokenizers(fast=snake_case , do_lower_case=snake_case )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase : List[Any] = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
UpperCAmelCase : int = tokenizer.convert_tokens_to_string(snake_case )
self.assertIsInstance(snake_case , snake_case )
| 679 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__a : Any = logging.get_logger(__name__)
def __magic_name__ ( lowercase_ ) -> int:
'''simple docstring'''
UpperCamelCase = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , )
UpperCamelCase = DetaConfig(
backbone_config=lowercase_ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=lowercase_ , with_box_refine=lowercase_ , two_stage=lowercase_ , )
# set labels
UpperCamelCase = "huggingface/label-files"
if "o365" in model_name:
UpperCamelCase = 366
UpperCamelCase = "object365-id2label.json"
else:
UpperCamelCase = 91
UpperCamelCase = "coco-detection-id2label.json"
UpperCamelCase = num_labels
UpperCamelCase = json.load(open(cached_download(hf_hub_url(lowercase_ , lowercase_ , repo_type="dataset" ) ) , "r" ) )
UpperCamelCase = {int(lowercase_ ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
return config
def __magic_name__ ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = dct.pop(lowercase_ )
UpperCamelCase = val
def __magic_name__ ( lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCamelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
UpperCamelCase = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:dim, :]
UpperCamelCase = in_proj_bias[: dim]
UpperCamelCase = in_proj_weight[
dim : dim * 2, :
]
UpperCamelCase = in_proj_bias[
dim : dim * 2
]
UpperCamelCase = in_proj_weight[
-dim :, :
]
UpperCamelCase = in_proj_bias[-dim :]
# fmt: on
def __magic_name__ ( lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
UpperCamelCase = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:hidden_size, :]
UpperCamelCase = in_proj_bias[:hidden_size]
UpperCamelCase = in_proj_weight[
hidden_size : hidden_size * 2, :
]
UpperCamelCase = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase = in_proj_weight[-hidden_size:, :]
UpperCamelCase = in_proj_bias[-hidden_size:]
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
UpperCamelCase = get_deta_config(lowercase_ )
# load original state dict
if model_name == "deta-swin-large":
UpperCamelCase = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
UpperCamelCase = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
UpperCamelCase = torch.load(lowercase_ , map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(lowercase_ , param.shape )
# rename keys
UpperCamelCase = create_rename_keys(lowercase_ )
for src, dest in rename_keys:
rename_key(lowercase_ , lowercase_ , lowercase_ )
read_in_swin_q_k_v(lowercase_ , config.backbone_config )
read_in_decoder_q_k_v(lowercase_ , lowercase_ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
UpperCamelCase = state_dict.pop(lowercase_ )
UpperCamelCase = val
if "input_proj" in key:
UpperCamelCase = state_dict.pop(lowercase_ )
UpperCamelCase = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
UpperCamelCase = state_dict.pop(lowercase_ )
UpperCamelCase = val
# finally, create HuggingFace model and load state dict
UpperCamelCase = DetaForObjectDetection(lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
UpperCamelCase = "cuda" if torch.cuda.is_available() else "cpu"
model.to(lowercase_ )
# load image processor
UpperCamelCase = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
UpperCamelCase = prepare_img()
UpperCamelCase = processor(images=lowercase_ , return_tensors="pt" )
UpperCamelCase = encoding["pixel_values"]
UpperCamelCase = model(pixel_values.to(lowercase_ ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
UpperCamelCase = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
UpperCamelCase = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
UpperCamelCase = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
UpperCamelCase = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(lowercase_ ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(lowercase_ ) , atol=1E-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
__a : str = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
type=str,
default="""deta-swin-large""",
choices=["""deta-swin-large""", """deta-swin-large-o365"""],
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
help="""Path to the folder to output PyTorch model.""",
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__a : List[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 606 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : str = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = "efficientformer"
def __init__( self , snake_case = [3, 2, 6, 4] , snake_case = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case = [True, True, True, True] , snake_case = 4_4_8 , snake_case = 3_2 , snake_case = 4 , snake_case = 7 , snake_case = 5 , snake_case = 8 , snake_case = 4 , snake_case = 0.0 , snake_case = 1_6 , snake_case = 3 , snake_case = 3 , snake_case = 3 , snake_case = 2 , snake_case = 1 , snake_case = 0.0 , snake_case = 1 , snake_case = True , snake_case = True , snake_case = 1e-5 , snake_case = "gelu" , snake_case = 0.02 , snake_case = 1e-12 , snake_case = 2_2_4 , snake_case = 1e-05 , **snake_case , ):
'''simple docstring'''
super().__init__(**snake_case )
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Optional[Any] = hidden_dropout_prob
UpperCAmelCase : List[Any] = hidden_sizes
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : int = patch_size
UpperCAmelCase : Optional[int] = num_channels
UpperCAmelCase : Any = depths
UpperCAmelCase : Dict = mlp_expansion_ratio
UpperCAmelCase : List[str] = downsamples
UpperCAmelCase : List[Any] = dim
UpperCAmelCase : Any = key_dim
UpperCAmelCase : List[str] = attention_ratio
UpperCAmelCase : Union[str, Any] = resolution
UpperCAmelCase : List[str] = pool_size
UpperCAmelCase : Dict = downsample_patch_size
UpperCAmelCase : Optional[int] = downsample_stride
UpperCAmelCase : Any = downsample_pad
UpperCAmelCase : int = drop_path_rate
UpperCAmelCase : Optional[Any] = num_metaad_blocks
UpperCAmelCase : List[str] = distillation
UpperCAmelCase : int = use_layer_scale
UpperCAmelCase : List[str] = layer_scale_init_value
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Any = batch_norm_eps
| 679 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self ,_A ,_A=13 ,_A=7 ,_A=True ,_A=True ,_A=True ,_A=True ,_A=99 ,_A=32 ,_A=5 ,_A=4 ,_A=37 ,_A="gelu" ,_A=0.1 ,_A=0.1 ,_A=512 ,_A=16 ,_A=2 ,_A=0.0_2 ,_A=4 ,):
'''simple docstring'''
_lowerCAmelCase : Any = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : Union[str, Any] = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : Any = use_attention_mask
_lowerCAmelCase : int = use_token_type_ids
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : Union[str, Any] = vocab_size
_lowerCAmelCase : Optional[Any] = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : List[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Dict = max_position_embeddings
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : Optional[int] = initializer_range
_lowerCAmelCase : int = num_choices
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCAmelCase : List[Any] = None
if self.use_attention_mask:
_lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : int = None
if self.use_token_type_ids:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowerCAmelCase : Optional[Any] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_A ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
_lowerCAmelCase : str = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCAmelCase : Tuple = config_and_inputs
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __UpperCamelCase ( lowercase__ , unittest.TestCase ):
_UpperCAmelCase = True
_UpperCAmelCase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowerCAmelCase : Tuple = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' ,from_pt=_A )
_lowerCAmelCase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(_A )
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' ,from_pt=_A )
_lowerCAmelCase : Any = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ,dtype=jnp.intaa )
_lowerCAmelCase : List[str] = model(_A )[0]
_lowerCAmelCase : Optional[Any] = [1, 11, 5_0265]
self.assertEqual(list(output.shape ) ,_A )
# compare the actual values for a slice.
_lowerCAmelCase : List[Any] = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] ,dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] ,_A ,atol=1E-4 ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' ,from_pt=_A )
_lowerCAmelCase : Tuple = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ,dtype=jnp.intaa )
_lowerCAmelCase : List[str] = model(_A )[0]
# compare the actual values for a slice.
_lowerCAmelCase : Dict = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] ,dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] ,_A ,atol=1E-4 ) )
| 259 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=3 , snake_case=3_2 , snake_case=3 , snake_case=1_0 , snake_case=[1_0, 2_0, 3_0, 4_0] , snake_case=[1, 1, 2, 1] , snake_case=True , snake_case=True , snake_case="relu" , snake_case=3 , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : Dict = parent
UpperCAmelCase : int = batch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Union[str, Any] = num_channels
UpperCAmelCase : List[str] = embeddings_size
UpperCAmelCase : Any = hidden_sizes
UpperCAmelCase : int = depths
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : List[str] = use_labels
UpperCAmelCase : int = hidden_act
UpperCAmelCase : Union[str, Any] = num_labels
UpperCAmelCase : str = scope
UpperCAmelCase : str = len(snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def A_ ( self ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = TFResNetModel(config=snake_case )
UpperCAmelCase : int = model(snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.num_labels
UpperCAmelCase : List[Any] = TFResNetForImageClassification(snake_case )
UpperCAmelCase : Union[str, Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = config_and_inputs
UpperCAmelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ : Optional[int] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = TFResNetModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case )
def A_ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self ):
'''simple docstring'''
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def A_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(snake_case )
UpperCAmelCase : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : List[str] = [*signature.parameters.keys()]
UpperCAmelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case , snake_case , snake_case ):
UpperCAmelCase : Optional[Any] = model_class(snake_case )
UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : List[str] = self.model_tester.num_stages
self.assertEqual(len(snake_case ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[int] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase : str = layer_type
UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : str = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def A_ ( self ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Any = TFResNetModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase : Union[str, Any] = self.default_image_processor
UpperCAmelCase : Tuple = prepare_img()
UpperCAmelCase : str = image_processor(images=snake_case , return_tensors="tf" )
# forward pass
UpperCAmelCase : Any = model(**snake_case )
# verify the logits
UpperCAmelCase : Any = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCAmelCase : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case , atol=1e-4 ) )
| 679 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
def __init__( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=7 , __lowerCamelCase : Any=3 , __lowerCamelCase : Optional[int]=18 , __lowerCamelCase : List[Any]=30 , __lowerCamelCase : Optional[int]=4_00 , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=True , __lowerCamelCase : str=None , ):
"""simple docstring"""
lowerCAmelCase__ = size if size is not None else {"shortest_edge": 20}
lowerCAmelCase__ = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = image_size
lowerCAmelCase__ = min_resolution
lowerCAmelCase__ = max_resolution
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = do_center_crop
lowerCAmelCase__ = crop_size
def A__ ( self : int ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ (lowercase__ , unittest.TestCase ):
lowercase_ : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def A__ ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = MobileNetVaImageProcessingTester(self )
@property
def A__ ( self : Any ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''crop_size''' ) )
def A__ ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def A__ ( self : Optional[int] ):
"""simple docstring"""
pass
def A__ ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A__ ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A__ ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 615 |
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=9_9 , snake_case=6_4 , snake_case=5 , snake_case=4 , snake_case=6_4 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : List[Any] = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : int = seq_length
UpperCAmelCase : Dict = is_training
UpperCAmelCase : Optional[Any] = use_input_mask
UpperCAmelCase : Optional[Any] = use_token_type_ids
UpperCAmelCase : Optional[Any] = use_labels
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[str] = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Tuple = attention_probs_dropout_prob
UpperCAmelCase : Any = max_position_embeddings
UpperCAmelCase : Tuple = type_vocab_size
UpperCAmelCase : Union[str, Any] = type_sequence_label_size
UpperCAmelCase : int = initializer_range
UpperCAmelCase : Dict = num_labels
UpperCAmelCase : Union[str, Any] = num_choices
UpperCAmelCase : List[Any] = scope
def A_ ( self ):
'''simple docstring'''
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Any = None
if self.use_input_mask:
UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : str = None
UpperCAmelCase : Dict = None
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self ):
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MPNetModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Dict = model(snake_case , snake_case )
UpperCAmelCase : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : int = MPNetForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Dict = model(
snake_case , attention_mask=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : Optional[int] = MPNetForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Optional[int] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.num_choices
UpperCAmelCase : Optional[int] = MPNetForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Tuple = model(
snake_case , attention_mask=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.num_labels
UpperCAmelCase : Tuple = MPNetForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : str = config_and_inputs
UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Any = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : str = True
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MPNetModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=3_7 )
def A_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*snake_case )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = MPNetModel.from_pretrained("microsoft/mpnet-base" )
UpperCAmelCase : Optional[int] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCAmelCase : Optional[Any] = model(snake_case )[0]
UpperCAmelCase : Optional[int] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , snake_case )
UpperCAmelCase : Optional[Any] = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
| 679 | 0 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_lowerCAmelCase : str = logging.get_logger(__name__)
class __magic_name__ ( lowercase__ ):
def __init__( self , *__snake_case , **__snake_case ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 242 |
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a : Optional[Any] = logging.get_logger(__name__)
a : List[str] = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
UpperCAmelCase : List[str] = TOKENIZER_CLASSES
else:
UpperCAmelCase : int = {tokenizer_name: getattr(__magic_name__ , tokenizer_name + "Fast" )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
UpperCAmelCase : Tuple = TOKENIZER_CLASSES[tokenizer_name]
UpperCAmelCase : Union[str, Any] = True
if checkpoint_name is None:
UpperCAmelCase : List[str] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
UpperCAmelCase : Dict = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
UpperCAmelCase : Union[str, Any] = tokenizer_class.from_pretrained(__magic_name__ , force_download=__magic_name__ )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCAmelCase , UpperCAmelCase : Dict = checkpoint.split("/" )
UpperCAmelCase : Optional[int] = os.path.join(__magic_name__ , __magic_name__ )
elif add_prefix:
UpperCAmelCase : List[Any] = checkpoint
UpperCAmelCase : str = dump_path
else:
UpperCAmelCase : List[str] = None
UpperCAmelCase : List[Any] = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
UpperCAmelCase : List[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
UpperCAmelCase : List[Any] = file_path.split(__magic_name__ )[-1][0]
if next_char == "/":
UpperCAmelCase : str = os.path.join(__magic_name__ , __magic_name__ )
UpperCAmelCase : Dict = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
UpperCAmelCase : Any = tokenizer.save_pretrained(
__magic_name__ , legacy_format=__magic_name__ , filename_prefix=__magic_name__ )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(__magic_name__ )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
a : Any = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 679 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__A : Optional[Any] = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def __a ( A__ : List[Any] , A__ : List[Any] , A__ : Tuple=None , A__ : Any=None , A__ : Tuple=None , A__ : str=None , A__ : Optional[int]=None , A__ : Tuple=None , ):
if attention_mask is None:
SCREAMING_SNAKE_CASE = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
SCREAMING_SNAKE_CASE = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : int=13 , __lowerCamelCase : Optional[int]=7 , __lowerCamelCase : Any=True , __lowerCamelCase : int=False , __lowerCamelCase : Tuple=99 , __lowerCamelCase : Any=16 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : str=4 , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : Any=2 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : List[str]=0 , __lowerCamelCase : List[Any]=0.02 , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = initializer_range
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
SCREAMING_SNAKE_CASE = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
SCREAMING_SNAKE_CASE = shift_tokens_right(__lowerCamelCase , 1 , 2 )
SCREAMING_SNAKE_CASE = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__lowerCamelCase , )
SCREAMING_SNAKE_CASE = prepare_blenderbot_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = 20
SCREAMING_SNAKE_CASE = model_class_name(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.encode(inputs_dict["input_ids"] )
SCREAMING_SNAKE_CASE = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCamelCase , )
SCREAMING_SNAKE_CASE = model.decode(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"Max diff is {diff}" )
def _snake_case ( self : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = 20
SCREAMING_SNAKE_CASE = model_class_name(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.encode(inputs_dict["input_ids"] )
SCREAMING_SNAKE_CASE = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
SCREAMING_SNAKE_CASE = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] , __lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
SCREAMING_SNAKE_CASE = model.decode(__lowerCamelCase , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase )
SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"Max diff is {diff}" )
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = 9_9
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
SCREAMING_SNAKE_CASE = input_ids.shape[0]
SCREAMING_SNAKE_CASE = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self._get_config_and_data()
SCREAMING_SNAKE_CASE = FlaxBlenderbotForConditionalGeneration(__lowerCamelCase )
SCREAMING_SNAKE_CASE = lm_model(input_ids=__lowerCamelCase )
SCREAMING_SNAKE_CASE = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , __lowerCamelCase )
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
SCREAMING_SNAKE_CASE = FlaxBlenderbotForConditionalGeneration(__lowerCamelCase )
SCREAMING_SNAKE_CASE = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE = lm_model(input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase )
SCREAMING_SNAKE_CASE = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , __lowerCamelCase )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE = shift_tokens_right(__lowerCamelCase , 1 , 2 )
SCREAMING_SNAKE_CASE = np.equal(__lowerCamelCase , 1 ).astype(np.floataa ).sum()
SCREAMING_SNAKE_CASE = np.equal(__lowerCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__lowerCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase , lowercase__ ):
'''simple docstring'''
lowerCamelCase__ = True
lowerCamelCase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCamelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = FlaxBlenderbotModelTester(self )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
@jax.jit
def encode_jitted(__lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Dict ):
return model.encode(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase )
with self.subTest("JIT Enabled" ):
SCREAMING_SNAKE_CASE = encode_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE = encode_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
SCREAMING_SNAKE_CASE = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(__lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ):
return model.decode(
decoder_input_ids=__lowerCamelCase , decoder_attention_mask=__lowerCamelCase , encoder_outputs=__lowerCamelCase , )
with self.subTest("JIT Enabled" ):
SCREAMING_SNAKE_CASE = decode_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE = decode_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _snake_case ( self : Dict ):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("facebook/blenderbot-400M-distill" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
SCREAMING_SNAKE_CASE = np.ones((1, 1) ) * model.config.eos_token_id
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU." )
@slow
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = {"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
SCREAMING_SNAKE_CASE = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
SCREAMING_SNAKE_CASE = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=__lowerCamelCase )
SCREAMING_SNAKE_CASE = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B" )
SCREAMING_SNAKE_CASE = ["Sam"]
SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , return_tensors="jax" )
SCREAMING_SNAKE_CASE = model.generate(**__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = "Sam is a great name. It means \"sun\" in Gaelic."
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase , **__lowerCamelCase )
assert generated_txt[0].strip() == tgt_text | 16 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "dandelin/vilt-b32-finetuned-vqa"
SCREAMING_SNAKE_CASE__ : Dict = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
SCREAMING_SNAKE_CASE__ : List[str] = "image_qa"
SCREAMING_SNAKE_CASE__ : int = AutoProcessor
SCREAMING_SNAKE_CASE__ : Tuple = AutoModelForVisualQuestionAnswering
SCREAMING_SNAKE_CASE__ : Any = ["image", "text"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["text"]
def __init__( self , *snake_case , **snake_case ):
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*snake_case , **snake_case )
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
return self.pre_processor(snake_case , snake_case , return_tensors="pt" )
def A_ ( self , snake_case ):
'''simple docstring'''
with torch.no_grad():
return self.model(**snake_case ).logits
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Any = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 679 | 0 |
import collections
import importlib.util
import os
import re
from pathlib import Path
lowercase : str = "src/transformers"
# Matches is_xxx_available()
lowercase : Union[str, Any] = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
lowercase : int = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowercase : Any = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
lowercase : Dict = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
lowercase : Any = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowercase : List[str] = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
lowercase : Union[str, Any] = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowercase : List[str] = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
lowercase : Any = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
lowercase : Union[str, Any] = re.compile(r"^\s*try:")
# Catches a line with else:
lowercase : Tuple = re.compile(r"^\s*else:")
def snake_case__ ( lowerCamelCase_ ):
if _re_test_backend.search(lowerCamelCase_ ) is None:
return None
A : Optional[int] = [b[0] for b in _re_backend.findall(lowerCamelCase_ )]
backends.sort()
return "_and_".join(lowerCamelCase_ )
def snake_case__ ( lowerCamelCase_ ):
with open(lowerCamelCase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
A : str = f.readlines()
A : Optional[int] = 0
while line_index < len(lowerCamelCase_ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCamelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
A : str = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
A : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCamelCase_ ):
A : int = _re_one_line_import_struct.search(lowerCamelCase_ ).groups()[0]
A : Any = re.findall('''\[([^\]]+)\]''' , lowerCamelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
A : Optional[int] = _re_import_struct_key_value.search(lowerCamelCase_ )
if single_line_import_search is not None:
A : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(lowerCamelCase_ ) > 0]
objects.extend(lowerCamelCase_ )
elif line.startswith(''' ''' * 8 + '''\"''' ):
objects.append(line[9:-3] )
line_index += 1
A : Dict = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
A : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : List[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
A : List[str] = lines[line_index]
if _re_import_struct_add_one.search(lowerCamelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(lowerCamelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(lowerCamelCase_ ) is not None:
A : List[str] = _re_import_struct_add_many.search(lowerCamelCase_ ).groups()[0].split(''', ''' )
A : int = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0]
objects.extend(lowerCamelCase_ )
elif _re_between_brackets.search(lowerCamelCase_ ) is not None:
A : Optional[Any] = _re_between_brackets.search(lowerCamelCase_ ).groups()[0].split(''', ''' )
A : Optional[int] = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0]
objects.extend(lowerCamelCase_ )
elif _re_quote_object.search(lowerCamelCase_ ) is not None:
objects.append(_re_quote_object.search(lowerCamelCase_ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''\"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''\"''' ):
objects.append(line[13:-3] )
line_index += 1
A : Optional[int] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A : List[str] = []
while (
line_index < len(lowerCamelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
A : int = lines[line_index]
A : Tuple = _re_import.search(lowerCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
A : Optional[Any] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCamelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
A : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
A : str = lines[line_index]
A : Tuple = _re_import.search(lowerCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
A : Dict = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
def find_duplicates(lowerCamelCase_ ):
return [k for k, v in collections.Counter(lowerCamelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A : Tuple = []
for key in import_dict_objects.keys():
A : List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
A : Any = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A : List[Any] = "base imports" if key == "none" else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def snake_case__ ( ):
A : int = []
for root, _, files in os.walk(lowerCamelCase_ ):
if "__init__.py" in files:
A : Dict = os.path.join(lowerCamelCase_ , '''__init__.py''' )
A : Optional[Any] = parse_init(lowerCamelCase_ )
if objects is not None:
A : int = analyze_results(*lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
A : Union[str, Any] = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('''\n'''.join(lowerCamelCase_ ) )
if len(lowerCamelCase_ ) > 0:
raise ValueError('''\n\n'''.join(lowerCamelCase_ ) )
def snake_case__ ( ):
A : Union[str, Any] = []
for path, directories, files in os.walk(lowerCamelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(lowerCamelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCamelCase_ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
A : Any = str((Path(lowerCamelCase_ ) / folder).relative_to(lowerCamelCase_ ) )
A : Optional[Any] = short_path.replace(os.path.sep , '''.''' )
submodules.append(lowerCamelCase_ )
for fname in files:
if fname == "__init__.py":
continue
A : List[str] = str((Path(lowerCamelCase_ ) / fname).relative_to(lowerCamelCase_ ) )
A : str = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(lowerCamelCase_ )
return submodules
lowercase : str = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def snake_case__ ( ):
A : str = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(lowerCamelCase_ , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
A : Optional[int] = spec.loader.load_module()
A : Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowerCamelCase_ ) > 0:
A : List[str] = "\n".join(F'- {module}' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
F'{list_of_modules}\n'
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 542 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
a : Optional[int] = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = R"\w+[.]\d+"
UpperCAmelCase : Dict = re.findall(__magic_name__ , __magic_name__ )
for pat in pats:
UpperCAmelCase : Tuple = key.replace(__magic_name__ , "_".join(pat.split("." ) ) )
return key
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase : Dict = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase : int = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase : Union[str, Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=42 ):
'''simple docstring'''
UpperCAmelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase : Tuple = flax_model.init_weights(PRNGKey(__magic_name__ ) )
UpperCAmelCase : Optional[Any] = flatten_dict(__magic_name__ )
UpperCAmelCase : List[str] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase : Tuple = rename_key(__magic_name__ )
UpperCAmelCase : List[str] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
UpperCAmelCase , UpperCAmelCase : Optional[int] = rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
UpperCAmelCase : Optional[int] = jnp.asarray(__magic_name__ )
return unflatten_dict(__magic_name__ )
| 679 | 0 |
from __future__ import annotations
def __lowerCAmelCase ( __snake_case , __snake_case ):
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
__lowerCAmelCase = number_of_bytes // partitions
__lowerCAmelCase = []
for i in range(__snake_case ):
__lowerCAmelCase = i * bytes_per_partition + 1
__lowerCAmelCase = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = (EulerDiscreteScheduler,)
SCREAMING_SNAKE_CASE__ : List[Any] = 10
def A_ ( self , **snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = {
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**snake_case )
return config
def A_ ( self ):
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case )
def A_ ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case , beta_end=snake_case )
def A_ ( self ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case )
def A_ ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase : Union[str, Any] = self.get_scheduler_config()
UpperCAmelCase : Optional[Any] = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = self.dummy_model()
UpperCAmelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : Any = sample.to(snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : Tuple = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : List[Any] = model(snake_case , snake_case )
UpperCAmelCase : str = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Dict = output.prev_sample
UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : List[Any] = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase : int = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCAmelCase : List[Any] = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = self.dummy_model()
UpperCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : int = sample.to(snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : str = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : Dict = model(snake_case , snake_case )
UpperCAmelCase : List[Any] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Any = output.prev_sample
UpperCAmelCase : Optional[int] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase : Optional[int] = self.get_scheduler_config()
UpperCAmelCase : Any = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : int = self.dummy_model()
UpperCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase : str = sample.to(snake_case )
for t in scheduler.timesteps:
UpperCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : List[Any] = model(snake_case , snake_case )
UpperCAmelCase : List[str] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Dict = output.prev_sample
UpperCAmelCase : Optional[int] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.scheduler_classes[0]
UpperCAmelCase : Tuple = self.get_scheduler_config()
UpperCAmelCase : Dict = scheduler_class(**snake_case , use_karras_sigmas=snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case )
UpperCAmelCase : List[str] = torch.manual_seed(0 )
UpperCAmelCase : Any = self.dummy_model()
UpperCAmelCase : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase : List[str] = sample.to(snake_case )
for t in scheduler.timesteps:
UpperCAmelCase : str = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : Dict = model(snake_case , snake_case )
UpperCAmelCase : Dict = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : List[str] = output.prev_sample
UpperCAmelCase : int = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
| 679 | 0 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def lowerCAmelCase__ ( _a : int ):
snake_case_ : Optional[Any] = os.path.join(args.tf_model_dir , "parameters.json" )
snake_case_ : Optional[int] = json.loads(open(_a ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith(".pt" ):
snake_case_ : int = args.output + ".pt"
snake_case_ : Optional[int] = OrderedDict()
with tf.device("/CPU:0" ):
snake_case_ : int = tf.train.load_checkpoint(args.tf_model_dir )
snake_case_ : List[Any] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
snake_case_ : Union[str, Any] = reader.get_tensor(_a ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
snake_case_ : str = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
snake_case_ : str = 8
snake_case_ : Union[str, Any] = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
snake_case_ : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ : Union[str, Any] = torch.tensor(_a )
elif key_name.startswith("model/moe" ):
snake_case_ : List[str] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
snake_case_ : int = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
snake_case_ : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ : Tuple = torch.tensor(_a )
elif key_name.endswith("/softmlp/kernel" ):
snake_case_ : Any = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
snake_case_ : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ : Dict = torch.tensor(_a )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
snake_case_ : List[str] = key_name[-9:-7]
for i in range(16 ):
snake_case_ : Union[str, Any] = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
snake_case_ : Any = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
snake_case_ : Tuple = torch.tensor(_a )
elif key_name.startswith("model/mlp" ):
snake_case_ : str = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
snake_case_ : int = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
snake_case_ : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ : List[Any] = torch.tensor(_a )
elif key_name.endswith("/p1/bias" ):
snake_case_ : Optional[Any] = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
snake_case_ : Dict = vnp.copy() # same because it is one dimensional
snake_case_ : Optional[int] = torch.tensor(_a )
elif key_name.endswith("/p2/kernel" ):
snake_case_ : int = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
snake_case_ : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ : Tuple = torch.tensor(_a )
elif key_name.endswith("/p2/bias" ):
snake_case_ : str = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
snake_case_ : Optional[Any] = vnp.copy() # same because it is one dimensional
snake_case_ : Union[str, Any] = torch.tensor(_a )
elif key_name.startswith("model/ln" ):
snake_case_ : List[Any] = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
snake_case_ : List[str] = "model.blocks.%d.feed_forward.norm.bias" % player
snake_case_ : List[Any] = vnp.copy() # same because it is one dimensional
snake_case_ : List[str] = torch.tensor(_a )
elif key_name.endswith("/g" ):
snake_case_ : List[Any] = "model.blocks.%d.feed_forward.norm.weight" % player
snake_case_ : Union[str, Any] = vnp.copy() # same because it is one dimensional
snake_case_ : List[Any] = torch.tensor(_a )
elif key_name.startswith("model/att" ):
snake_case_ : List[Any] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
snake_case_ : Dict = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
snake_case_ : Any = state[:, 0, :, :]
snake_case_ : List[str] = state[:, 1, :, :]
snake_case_ : str = state[:, 2, :, :]
snake_case_ : int = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ : int = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ : Any = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ : str = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
snake_case_ : int = torch.tensor(_a )
snake_case_ : Any = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
snake_case_ : Tuple = torch.tensor(_a )
snake_case_ : List[Any] = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
snake_case_ : Tuple = torch.tensor(_a )
elif key_name.endswith("/o/kernel" ):
snake_case_ : List[Any] = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
snake_case_ : int = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ : Dict = torch.tensor(_a )
elif key_name.startswith("model/an" ):
snake_case_ : Optional[Any] = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
snake_case_ : Dict = "model.blocks.%d.self_attn.norm.bias" % player
snake_case_ : str = vnp.copy() # same because it is one dimensional
snake_case_ : Union[str, Any] = torch.tensor(_a )
elif key_name.endswith("/g" ):
snake_case_ : Union[str, Any] = "model.blocks.%d.self_attn.norm.weight" % player
snake_case_ : int = vnp.copy() # same because it is one dimensional
snake_case_ : Union[str, Any] = torch.tensor(_a )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
snake_case_ : Union[str, Any] = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
snake_case_ : Optional[Any] = "model.%s.weight" % nlayer
snake_case_ : Dict = vnp.copy() # same in embedded
snake_case_ : str = torch.tensor(_a )
if key_name.startswith("model/wte" ):
snake_case_ : str = "lm_head.weight"
snake_case_ : Optional[int] = vnp.copy() # same in embedded
snake_case_ : Any = torch.tensor(_a )
elif key_name.startswith("model/wob" ):
snake_case_ : Tuple = "final_logits_bias"
snake_case_ : Tuple = vnp.copy() # same in embedded
snake_case_ : Optional[Any] = state.reshape((1, -1) )
snake_case_ : Dict = torch.tensor(_a )
elif key_name == "model/dense/kernel":
snake_case_ : str = "model.last_project.weight"
snake_case_ : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ : Any = torch.tensor(_a )
elif key_name == "model/dense_1/bias":
snake_case_ : List[Any] = "model.last_project.bias"
snake_case_ : Dict = vnp.copy() # same because it is one dimensional
snake_case_ : Dict = torch.tensor(_a )
torch.save(_a , args.output )
if __name__ == "__main__":
lowercase : str = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
lowercase : Optional[int] = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 568 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def A_ ( self , snake_case ):
'''simple docstring'''
with open(snake_case , encoding="utf-8" ) as input_file:
UpperCAmelCase : Dict = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
UpperCAmelCase : Tuple = input_file.read()
UpperCAmelCase : List[Any] = regexp.search(snake_case )
return match
def A_ ( self , snake_case ):
'''simple docstring'''
with open(snake_case , encoding="utf-8" ) as input_file:
UpperCAmelCase : List[str] = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
UpperCAmelCase : List[Any] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase : str = regexp.finditer(snake_case )
UpperCAmelCase : Union[str, Any] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = Path("./datasets" )
UpperCAmelCase : Optional[int] = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(snake_case ) ):
raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Path("./datasets" )
UpperCAmelCase : Any = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(snake_case ) ):
raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 679 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCAmelCase_ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class _snake_case( unittest.TestCase ):
__snake_case: Any = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__snake_case: List[str] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__snake_case: Tuple = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__snake_case: List[Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def _UpperCamelCase (self : Optional[int] ) -> str:
"""simple docstring"""
A__ = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' )
A__ = text_classifier('This is great !' )
self.assertEqual(nested_simplify(a ) , [{'label': 'LABEL_0', 'score': 0.504}] )
A__ = text_classifier('This is great !' , top_k=2 )
self.assertEqual(
nested_simplify(a ) , [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}] )
A__ = text_classifier(['This is great !', 'This is bad'] , top_k=2 )
self.assertEqual(
nested_simplify(a ) , [
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
] , )
A__ = text_classifier('This is great !' , top_k=1 )
self.assertEqual(nested_simplify(a ) , [{'label': 'LABEL_0', 'score': 0.504}] )
# Legacy behavior
A__ = text_classifier('This is great !' , return_all_scores=a )
self.assertEqual(nested_simplify(a ) , [{'label': 'LABEL_0', 'score': 0.504}] )
A__ = text_classifier('This is great !' , return_all_scores=a )
self.assertEqual(
nested_simplify(a ) , [[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}]] )
A__ = text_classifier(['This is great !', 'Something else'] , return_all_scores=a )
self.assertEqual(
nested_simplify(a ) , [
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
] , )
A__ = text_classifier(['This is great !', 'Something else'] , return_all_scores=a )
self.assertEqual(
nested_simplify(a ) , [
{'label': 'LABEL_0', 'score': 0.504},
{'label': 'LABEL_0', 'score': 0.504},
] , )
@require_torch
def _UpperCamelCase (self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
import torch
A__ = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu' ) , )
A__ = text_classifier('This is great !' )
self.assertEqual(nested_simplify(a ) , [{'label': 'LABEL_0', 'score': 0.504}] )
@require_tf
def _UpperCamelCase (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
A__ = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf' )
A__ = text_classifier('This is great !' )
self.assertEqual(nested_simplify(a ) , [{'label': 'LABEL_0', 'score': 0.504}] )
@slow
@require_torch
def _UpperCamelCase (self : Optional[int] ) -> Any:
"""simple docstring"""
A__ = pipeline('text-classification' )
A__ = text_classifier('This is great !' )
self.assertEqual(nested_simplify(a ) , [{'label': 'POSITIVE', 'score': 1.0}] )
A__ = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(a ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
A__ = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(a ) , [{'label': 'POSITIVE', 'score': 0.988}] )
@slow
@require_tf
def _UpperCamelCase (self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = pipeline('text-classification' , framework='tf' )
A__ = text_classifier('This is great !' )
self.assertEqual(nested_simplify(a ) , [{'label': 'POSITIVE', 'score': 1.0}] )
A__ = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(a ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
A__ = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(a ) , [{'label': 'POSITIVE', 'score': 0.988}] )
def _UpperCamelCase (self : Dict , a : List[Any] , a : Tuple , a : Tuple ) -> Optional[Any]:
"""simple docstring"""
A__ = TextClassificationPipeline(model=a , tokenizer=a )
return text_classifier, ["HuggingFace is in", "This is another test"]
def _UpperCamelCase (self : List[str] , a : Optional[Any] , a : Any ) -> Any:
"""simple docstring"""
A__ = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
A__ = "HuggingFace is in"
A__ = text_classifier(a )
self.assertEqual(nested_simplify(a ) , [{'label': ANY(a ), 'score': ANY(a )}] )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
A__ = ["HuggingFace is in ", "Paris is in France"]
A__ = text_classifier(a )
self.assertEqual(
nested_simplify(a ) , [{'label': ANY(a ), 'score': ANY(a )}, {'label': ANY(a ), 'score': ANY(a )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
A__ = text_classifier(a , top_k=a )
A__ = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(a ) , [[{'label': ANY(a ), 'score': ANY(a )}] * N, [{'label': ANY(a ), 'score': ANY(a )}] * N] , )
A__ = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
A__ = text_classifier(a )
self.assertEqual(
nested_simplify(a ) , {'label': ANY(a ), 'score': ANY(a )} , )
self.assertTrue(outputs['label'] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
A__ = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(a ):
text_classifier(a )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
A__ = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] )
self.assertEqual(
nested_simplify(a ) , [{'label': ANY(a ), 'score': ANY(a )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
| 531 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a : str = logging.getLogger(__name__)
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def A_ ( self , snake_case , snake_case , snake_case=None , snake_case=None ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.layer[current_layer](snake_case , snake_case , head_mask[current_layer] )
UpperCAmelCase : Optional[int] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , lowercase__ , )
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__(snake_case )
UpperCAmelCase : Dict = BertEncoderWithPabee(snake_case )
self.init_weights()
UpperCAmelCase : int = 0
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : List[Any] = 0
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = threshold
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = patience
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = 0
UpperCAmelCase : List[Any] = 0
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.inference_layers_num / self.inference_instances_num
UpperCAmelCase : List[Any] = (
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(snake_case )
@add_start_docstrings_to_model_forward(snake_case )
def A_ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=False , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
UpperCAmelCase : Dict = input_ids.size()
elif inputs_embeds is not None:
UpperCAmelCase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
UpperCAmelCase : Optional[int] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCAmelCase : Tuple = torch.ones(snake_case , device=snake_case )
if token_type_ids is None:
UpperCAmelCase : List[Any] = torch.zeros(snake_case , dtype=torch.long , device=snake_case )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCAmelCase : torch.Tensor = self.get_extended_attention_mask(snake_case , snake_case , snake_case )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = encoder_hidden_states.size()
UpperCAmelCase : List[str] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
UpperCAmelCase : int = torch.ones(snake_case , device=snake_case )
UpperCAmelCase : str = self.invert_attention_mask(snake_case )
else:
UpperCAmelCase : int = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCAmelCase : Dict = self.get_head_mask(snake_case , self.config.num_hidden_layers )
UpperCAmelCase : Tuple = self.embeddings(
input_ids=snake_case , position_ids=snake_case , token_type_ids=snake_case , inputs_embeds=snake_case )
UpperCAmelCase : int = embedding_output
if self.training:
UpperCAmelCase : int = []
for i in range(self.config.num_hidden_layers ):
UpperCAmelCase : List[Any] = self.encoder.adaptive_forward(
snake_case , current_layer=snake_case , attention_mask=snake_case , head_mask=snake_case )
UpperCAmelCase : Dict = self.pooler(snake_case )
UpperCAmelCase : List[Any] = output_layers[i](output_dropout(snake_case ) )
res.append(snake_case )
elif self.patience == 0: # Use all layers for inference
UpperCAmelCase : Union[str, Any] = self.encoder(
snake_case , attention_mask=snake_case , head_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
UpperCAmelCase : Optional[int] = self.pooler(encoder_outputs[0] )
UpperCAmelCase : List[str] = [output_layers[self.config.num_hidden_layers - 1](snake_case )]
else:
UpperCAmelCase : int = 0
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Optional[Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
UpperCAmelCase : Tuple = self.encoder.adaptive_forward(
snake_case , current_layer=snake_case , attention_mask=snake_case , head_mask=snake_case )
UpperCAmelCase : Any = self.pooler(snake_case )
UpperCAmelCase : int = output_layers[i](snake_case )
if regression:
UpperCAmelCase : Optional[Any] = logits.detach()
if patient_result is not None:
UpperCAmelCase : Union[str, Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
UpperCAmelCase : Optional[Any] = 0
else:
UpperCAmelCase : Any = logits.detach().argmax(dim=1 )
if patient_result is not None:
UpperCAmelCase : Tuple = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(snake_case ) ):
patient_counter += 1
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = logits
if patient_counter == self.patience:
break
UpperCAmelCase : int = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , lowercase__ , )
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__(snake_case )
UpperCAmelCase : Union[str, Any] = config.num_labels
UpperCAmelCase : Optional[Any] = BertModelWithPabee(snake_case )
UpperCAmelCase : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase : Any = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(snake_case )
def A_ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : int = self.bert(
input_ids=snake_case , attention_mask=snake_case , token_type_ids=snake_case , position_ids=snake_case , head_mask=snake_case , inputs_embeds=snake_case , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
UpperCAmelCase : Tuple = (logits[-1],)
if labels is not None:
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : List[Any] = 0
for ix, logits_item in enumerate(snake_case ):
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase : Dict = MSELoss()
UpperCAmelCase : Union[str, Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase : Optional[int] = CrossEntropyLoss()
UpperCAmelCase : Tuple = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
UpperCAmelCase : int = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
UpperCAmelCase : Tuple = (total_loss / total_weights,) + outputs
return outputs
| 679 | 0 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
UpperCamelCase_ = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
UpperCamelCase_ = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
UpperCamelCase_ = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install \"sacrebleu>=1.4.12\"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def _UpperCAmelCase ( self : Dict , snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] = CHRF.CHAR_ORDER , snake_case_ : Union[str, Any] = CHRF.WORD_ORDER , snake_case_ : int = CHRF.BETA , snake_case_ : Optional[Any] = False , snake_case_ : List[str] = False , snake_case_ : Tuple = False , ):
"""simple docstring"""
A : List[str] = len(references[0] )
if any(len(snake_case_ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
A : Dict = [[refs[i] for refs in references] for i in range(snake_case_ )]
A : List[Any] = CHRF(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
A : int = sb_chrf.corpus_score(snake_case_ , snake_case_ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
} | 256 |
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : Tuple = tf.cast(math.pi , x.dtype )
UpperCAmelCase : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype )
UpperCAmelCase : List[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__magic_name__ , 3 )) ))
return x * cdf
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = tf.convert_to_tensor(__magic_name__ )
return x * tf.tanh(tf.math.softplus(__magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype )
UpperCAmelCase : int = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : Optional[Any] = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return tf.clip_by_value(_gelu(__magic_name__ ) , -10 , 10 )
def lowercase ( __magic_name__ , __magic_name__=-1 ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Dict = tf.split(__magic_name__ , 2 , axis=__magic_name__ )
return a * tf.math.sigmoid(__magic_name__ )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowercase ( __magic_name__ ):
'''simple docstring'''
return tf.keras.activations.gelu(__magic_name__ , approximate=__magic_name__ )
a : Tuple = tf.keras.activations.gelu
a : Dict = approximate_gelu_wrap
else:
a : List[str] = _gelu
a : List[Any] = _gelu_new
a : Optional[int] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowercase ( __magic_name__ ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 679 | 0 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ : List[Any] = TaConfig.from_json_file(lowerCAmelCase__ )
print(F"Building PyTorch model from configuration: {config}" )
lowerCamelCase_ : Union[str, Any] = TaForConditionalGeneration(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
_lowercase : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowercase : str =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 364 |
'''simple docstring'''
from __future__ import annotations
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = order
# a_{0} ... a_{k}
UpperCAmelCase : Optional[int] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCAmelCase : List[Any] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCAmelCase : Dict = [0.0] * self.order
# y[n-1] ... y[n-k]
UpperCAmelCase : Optional[Any] = [0.0] * self.order
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
if len(snake_case ) < self.order:
UpperCAmelCase : Dict = [1.0, *a_coeffs]
if len(snake_case ) != self.order + 1:
UpperCAmelCase : Optional[Any] = (
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(snake_case )}"
)
raise ValueError(snake_case )
if len(snake_case ) != self.order + 1:
UpperCAmelCase : Optional[Any] = (
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(snake_case )}"
)
raise ValueError(snake_case )
UpperCAmelCase : Optional[int] = a_coeffs
UpperCAmelCase : Optional[Any] = b_coeffs
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCAmelCase : Optional[int] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCAmelCase : List[str] = self.input_history[:-1]
UpperCAmelCase : List[Any] = self.output_history[:-1]
UpperCAmelCase : str = sample
UpperCAmelCase : str = result
return result
| 679 | 0 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = inspect.getfile(accelerate.test_utils )
UpperCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCamelCase = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = f'''\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '''.split()
UpperCamelCase = [sys.executable] + distributed_args
execute_subprocess_async(SCREAMING_SNAKE_CASE , env=os.environ.copy() )
| 606 |
'''simple docstring'''
import argparse
from collections import defaultdict
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = F"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Tuple = f.readlines()
UpperCAmelCase : Tuple = F"class {class_name}("
UpperCAmelCase : str = F"{4 * ' '}def {test_name}("
UpperCAmelCase : Dict = F"{8 * ' '}{correct_line.split()[0]}"
UpperCAmelCase : Tuple = F"{16 * ' '}{correct_line.split()[0]}"
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : List[str] = False
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : Dict = False
UpperCAmelCase : Tuple = 0
UpperCAmelCase : int = 0
UpperCAmelCase : Tuple = []
for line in lines:
if line.startswith(__magic_name__ ):
UpperCAmelCase : int = True
elif in_class and line.startswith(__magic_name__ ):
UpperCAmelCase : Dict = True
elif in_class and in_func and (line.startswith(__magic_name__ ) or line.startswith(__magic_name__ )):
UpperCAmelCase : List[str] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCAmelCase : List[str] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCAmelCase : List[str] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"{spaces * ' '}{correct_line}" )
UpperCAmelCase : List[str] = False
else:
new_lines.append(__magic_name__ )
with open(__magic_name__ , "w" ) as f:
for line in new_lines:
f.write(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__=None ):
'''simple docstring'''
if fail is not None:
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Optional[int] = {l.strip() for l in f.readlines()}
else:
UpperCAmelCase : Any = None
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Tuple = f.readlines()
UpperCAmelCase : int = defaultdict(__magic_name__ )
for line in correct_lines:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
a : List[Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 679 | 0 |
"""simple docstring"""
class __UpperCamelCase :
def __init__( self ,_A = "" ,_A = False ):
'''simple docstring'''
_lowerCAmelCase : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
_lowerCAmelCase : Tuple = is_leaf
_lowerCAmelCase : Optional[Any] = prefix
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = 0
for q, w in zip(self.prefix ,_A ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
for word in words:
self.insert(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.prefix == word:
_lowerCAmelCase : Any = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_lowerCAmelCase : Dict = RadixNode(prefix=_A ,is_leaf=_A )
else:
_lowerCAmelCase : Union[str, Any] = self.nodes[word[0]]
_lowerCAmelCase : Union[str, Any] = incoming_node.match(
_A )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(_A )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_lowerCAmelCase : Optional[Any] = remaining_prefix
_lowerCAmelCase : Dict = self.nodes[matching_string[0]]
_lowerCAmelCase : Optional[Any] = RadixNode(_A ,_A )
_lowerCAmelCase : List[Any] = aux_node
if remaining_word == "":
_lowerCAmelCase : Tuple = True
else:
self.nodes[matching_string[0]].insert(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.nodes.get(word[0] ,_A )
if not incoming_node:
return False
else:
_lowerCAmelCase : Optional[int] = incoming_node.match(
_A )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.nodes.get(word[0] ,_A )
if not incoming_node:
return False
else:
_lowerCAmelCase : Dict = incoming_node.match(
_A )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(_A )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
_lowerCAmelCase : Optional[int] = list(self.nodes.values() )[0]
_lowerCAmelCase : Optional[Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
_lowerCAmelCase : str = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
_lowerCAmelCase : List[Any] = False
# If there is 1 edge, we merge it with its child
else:
_lowerCAmelCase : Optional[Any] = list(incoming_node.nodes.values() )[0]
_lowerCAmelCase : List[str] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_lowerCAmelCase : List[str] = merging_node.nodes
return True
def __lowerCamelCase ( self ,_A = 0 ):
'''simple docstring'''
if self.prefix != "":
print('-' * height ,self.prefix ,' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : List[str] = "banana bananas bandana band apple all beast".split()
_lowerCAmelCase : str = RadixNode()
root.insert_many(_lowerCamelCase )
assert all(root.find(_lowerCamelCase ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def lowerCamelCase__ ( ):
'''simple docstring'''
assert test_trie()
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = RadixNode()
_lowerCAmelCase : Optional[Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(_lowerCamelCase )
print('Words:' , _lowerCamelCase )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 259 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : TreeNode | None = None
SCREAMING_SNAKE_CASE__ : TreeNode | None = None
a : Optional[Any] = namedtuple("CoinsDistribResult", "moves excess")
def lowercase ( __magic_name__ ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(__magic_name__ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__magic_name__ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__magic_name__ ) != count_coins(__magic_name__ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(__magic_name__ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = get_distrib(node.left )
UpperCAmelCase , UpperCAmelCase : Any = get_distrib(node.right )
UpperCAmelCase : Optional[Any] = 1 - left_distrib_excess
UpperCAmelCase : int = 1 - right_distrib_excess
UpperCAmelCase : List[Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(__magic_name__ )
+ abs(__magic_name__ )
)
UpperCAmelCase : List[Any] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__magic_name__ , __magic_name__ )
return get_distrib(__magic_name__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679 | 0 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def a_ ( __lowerCAmelCase ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class SCREAMING_SNAKE_CASE__ (nn.Module ):
def __init__( self : Any , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
super().__init__()
lowerCAmelCase__ = module
lowerCAmelCase__ = nn.Sequential(
nn.Linear(module.in_features , __lowerCamelCase , bias=__lowerCamelCase ) , nn.Linear(__lowerCamelCase , module.out_features , bias=__lowerCamelCase ) , )
lowerCAmelCase__ = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__lowerCamelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def A__ ( self : Optional[Any] , __lowerCamelCase : Any , *__lowerCamelCase : List[Any] , **__lowerCamelCase : List[str] ):
"""simple docstring"""
return self.module(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ) + self.adapter(__lowerCamelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
lowercase_ : Optional[Any] = "bigscience/bloom-1b7"
# Constant values
lowercase_ : Union[str, Any] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
lowercase_ : List[str] = "Hello my name is"
lowercase_ : str = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowercase_ : Optional[int] = 10
def A__ ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained(self.model_name )
class SCREAMING_SNAKE_CASE__ (lowercase__ ):
def A__ ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
# Models and tokenizer
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__lowerCamelCase , device_map='''auto''' )
def A__ ( self : str ):
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_abit.config
self.assertTrue(hasattr(__lowerCamelCase , '''quantization_config''' ) )
lowerCAmelCase__ = config.to_dict()
lowerCAmelCase__ = config.to_diff_dict()
lowerCAmelCase__ = config.to_json_string()
def A__ ( self : Optional[Any] ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
lowerCAmelCase__ = self.model_fpaa.get_memory_footprint()
lowerCAmelCase__ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
lowerCAmelCase__ = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def A__ ( self : Union[str, Any] ):
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__lowerCamelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def A__ ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCAmelCase__ = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__lowerCamelCase ) , self.EXPECTED_OUTPUTS )
def A__ ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = BitsAndBytesConfig()
lowerCAmelCase__ = True
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__lowerCamelCase , device_map='''auto''' )
lowerCAmelCase__ = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCAmelCase__ = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__lowerCamelCase ) , self.EXPECTED_OUTPUTS )
def A__ ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(__lowerCamelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__lowerCamelCase )
def A__ ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = BitsAndBytesConfig()
with self.assertRaises(__lowerCamelCase ):
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__lowerCamelCase , load_in_abit=__lowerCamelCase , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def A__ ( self : str ):
"""simple docstring"""
with self.assertRaises(__lowerCamelCase ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(__lowerCamelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__lowerCamelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(__lowerCamelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__lowerCamelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowerCAmelCase__ = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCAmelCase__ = self.model_fpaa.to(torch.floataa )
lowerCAmelCase__ = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
lowerCAmelCase__ = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
lowerCAmelCase__ = self.model_fpaa.half()
# Check this does not throw an error
lowerCAmelCase__ = self.model_fpaa.float()
def A__ ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=__lowerCamelCase , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
@classmethod
def A__ ( cls : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = "t5-small"
lowerCAmelCase__ = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
lowerCAmelCase__ = AutoTokenizer.from_pretrained(cls.model_name )
lowerCAmelCase__ = "Translate in German: Hello, my dog is cute"
def A__ ( self : Union[str, Any] ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : Dict ):
"""simple docstring"""
from transformers import TaForConditionalGeneration
lowerCAmelCase__ = TaForConditionalGeneration._keep_in_fpaa_modules
lowerCAmelCase__ = None
# test with `t5-small`
lowerCAmelCase__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__lowerCamelCase , device_map='''auto''' )
lowerCAmelCase__ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCAmelCase__ = model.generate(**__lowerCamelCase )
# test with `flan-t5-small`
lowerCAmelCase__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__lowerCamelCase , device_map='''auto''' )
lowerCAmelCase__ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCAmelCase__ = model.generate(**__lowerCamelCase )
lowerCAmelCase__ = modules
def A__ ( self : Tuple ):
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowerCAmelCase__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__lowerCamelCase , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
lowerCAmelCase__ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCAmelCase__ = model.generate(**__lowerCamelCase )
# test with `flan-t5-small`
lowerCAmelCase__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__lowerCamelCase , device_map='''auto''' )
lowerCAmelCase__ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCAmelCase__ = model.generate(**__lowerCamelCase )
class SCREAMING_SNAKE_CASE__ (lowercase__ ):
def A__ ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
# model_name
lowerCAmelCase__ = "bigscience/bloom-560m"
lowerCAmelCase__ = "t5-small"
# Different types of model
lowerCAmelCase__ = AutoModel.from_pretrained(self.model_name , load_in_abit=__lowerCamelCase , device_map='''auto''' )
# Sequence classification model
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__lowerCamelCase , device_map='''auto''' )
# CausalLM model
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__lowerCamelCase , device_map='''auto''' )
# Seq2seq model
lowerCAmelCase__ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__lowerCamelCase , device_map='''auto''' )
def A__ ( self : int ):
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : Any ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class SCREAMING_SNAKE_CASE__ (lowercase__ ):
def A__ ( self : List[Any] ):
"""simple docstring"""
super().setUp()
def A__ ( self : str ):
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
lowerCAmelCase__ = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class SCREAMING_SNAKE_CASE__ (lowercase__ ):
def A__ ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
def A__ ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__lowerCamelCase , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
lowerCAmelCase__ = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
lowerCAmelCase__ = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__lowerCamelCase ) , self.EXPECTED_OUTPUTS )
class SCREAMING_SNAKE_CASE__ (lowercase__ ):
def A__ ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = "facebook/opt-350m"
super().setUp()
def A__ ( self : Any ):
"""simple docstring"""
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__lowerCamelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
lowerCAmelCase__ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowerCAmelCase__ = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__lowerCamelCase ) ):
lowerCAmelCase__ = LoRALayer(module.q_proj , rank=16 )
lowerCAmelCase__ = LoRALayer(module.k_proj , rank=16 )
lowerCAmelCase__ = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
lowerCAmelCase__ = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowerCAmelCase__ = model.forward(**__lowerCamelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__lowerCamelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class SCREAMING_SNAKE_CASE__ (lowercase__ ):
lowercase_ : List[str] = "gpt2-xl"
lowercase_ : Optional[Any] = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 615 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a : List[Any] = logging.get_logger(__name__)
a : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a : int = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a : Any = {
"allenai/led-base-16384": 1_63_84,
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Tuple = LEDTokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , snake_case=True , **snake_case , ):
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(snake_case , pre_tok_state.pop("type" ) )
UpperCAmelCase : Any = add_prefix_space
UpperCAmelCase : str = pre_tok_class(**snake_case )
UpperCAmelCase : int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase : Dict = "post_processor"
UpperCAmelCase : Dict = getattr(self.backend_tokenizer , snake_case , snake_case )
if tokenizer_component_instance:
UpperCAmelCase : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase : int = tuple(state["sep"] )
if "cls" in state:
UpperCAmelCase : Union[str, Any] = tuple(state["cls"] )
UpperCAmelCase : Tuple = False
if state.get("add_prefix_space" , snake_case ) != add_prefix_space:
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Optional[int] = True
if state.get("trim_offsets" , snake_case ) != trim_offsets:
UpperCAmelCase : Tuple = trim_offsets
UpperCAmelCase : List[str] = True
if changes_to_apply:
UpperCAmelCase : Optional[Any] = getattr(snake_case , state.pop("type" ) )
UpperCAmelCase : Tuple = component_class(**snake_case )
setattr(self.backend_tokenizer , snake_case , snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def A_ ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value
UpperCAmelCase : Optional[Any] = value
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case , **snake_case )
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case , **snake_case )
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : str = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def A_ ( self , snake_case , snake_case=None ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self , snake_case , snake_case = None , snake_case = PaddingStrategy.DO_NOT_PAD , snake_case = None , snake_case = None , ):
'''simple docstring'''
UpperCAmelCase : int = super()._pad(
encoded_inputs=snake_case , max_length=snake_case , padding_strategy=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , )
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase : int = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase : Optional[int] = len(encoded_inputs["global_attention_mask"] ) != len(snake_case )
if needs_to_be_padded:
UpperCAmelCase : Tuple = len(snake_case ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase : List[str] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase : Any = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 679 | 0 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def UpperCamelCase_( _snake_case : List[str] , _snake_case : Dict=False ):
"""simple docstring"""
try:
__a =os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__a =default
else:
# KEY is set, convert it to True or False.
try:
__a =strtobool(_snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'If set, {key} must be yes or no.' )
return _value
_lowerCAmelCase : Optional[int] = parse_flag_from_env("RUN_SLOW", default=False)
_lowerCAmelCase : List[Any] = parse_flag_from_env("RUN_REMOTE", default=False)
_lowerCAmelCase : Dict = parse_flag_from_env("RUN_LOCAL", default=True)
_lowerCAmelCase : Optional[Any] = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
_lowerCAmelCase : Any = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
_lowerCAmelCase : Optional[int] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
_lowerCAmelCase : Union[str, Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
_lowerCAmelCase : List[Any] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
)
# Beam
_lowerCAmelCase : List[Any] = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
_lowerCAmelCase : List[Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
_lowerCAmelCase : str = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def UpperCamelCase_( _snake_case : Optional[int] ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
__a =unittest.skip('test requires faiss' )(_snake_case )
return test_case
def UpperCamelCase_( _snake_case : Union[str, Any] ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
__a =unittest.skip('test requires regex' )(_snake_case )
return test_case
def UpperCamelCase_( _snake_case : Tuple ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
__a =unittest.skip('test requires elasticsearch' )(_snake_case )
return test_case
def UpperCamelCase_( _snake_case : Tuple ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
__a =unittest.skip('test requires sqlalchemy' )(_snake_case )
return test_case
def UpperCamelCase_( _snake_case : Dict ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
__a =unittest.skip('test requires PyTorch' )(_snake_case )
return test_case
def UpperCamelCase_( _snake_case : Union[str, Any] ):
"""simple docstring"""
if not config.TF_AVAILABLE:
__a =unittest.skip('test requires TensorFlow' )(_snake_case )
return test_case
def UpperCamelCase_( _snake_case : Optional[Any] ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
__a =unittest.skip('test requires JAX' )(_snake_case )
return test_case
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
__a =unittest.skip('test requires Pillow' )(_snake_case )
return test_case
def UpperCamelCase_( _snake_case : Tuple ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(_snake_case )
else:
return test_case
def UpperCamelCase_( _snake_case : Tuple ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(_snake_case )
else:
return test_case
def UpperCamelCase_( _snake_case : Optional[int] ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(_snake_case )
else:
return test_case
def UpperCamelCase_( _snake_case : List[str] ):
"""simple docstring"""
def _require_spacy_model(_snake_case : List[str] ):
try:
import spacy # noqa F401
spacy.load(_snake_case )
except ImportError:
return unittest.skip('test requires spacy' )(_snake_case )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(_snake_case ) )(_snake_case )
else:
return test_case
return _require_spacy_model
def UpperCamelCase_( _snake_case : Dict ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(_snake_case )
else:
return test_case
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(_snake_case )
else:
return test_case
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
__a =unittest.skip('test is slow' )(_snake_case )
return test_case
def UpperCamelCase_( _snake_case : Tuple ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
__a =unittest.skip('test is local' )(_snake_case )
return test_case
def UpperCamelCase_( _snake_case : Optional[Any] ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
__a =unittest.skip('test is packaged' )(_snake_case )
return test_case
def UpperCamelCase_( _snake_case : Any ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
__a =unittest.skip('test requires remote' )(_snake_case )
return test_case
def UpperCamelCase_( *_snake_case : Optional[Any] ):
"""simple docstring"""
def decorate(cls : Any ):
for name, fn in cls.__dict__.items():
if callable(_snake_case ) and name.startswith('test' ):
for decorator in decorators:
__a =decorator(_snake_case )
setattr(cls , _snake_case , _snake_case )
return cls
return decorate
class __magic_name__ ( lowercase__ ):
pass
class __magic_name__ ( lowercase__ ):
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 2
@contextmanager
def UpperCamelCase_( _snake_case : Optional[int]=OfflineSimulationMode.CONNECTION_FAILS , _snake_case : Tuple=1e-1_6 ):
"""simple docstring"""
__a =requests.Session().request
def timeout_request(_snake_case : List[Any] , _snake_case : Tuple , _snake_case : Union[str, Any] , **_snake_case : Any ):
# Change the url to an invalid url so that the connection hangs
__a ="https://10.255.255.1"
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
__a =timeout
try:
return online_request(_snake_case , _snake_case , **_snake_case )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__a =url
__a =e.args[0]
__a =(max_retry_error.args[0].replace('10.255.255.1' , F'OfflineMock[{url}]' ),)
__a =(max_retry_error,)
raise
def raise_connection_error(_snake_case : Dict , _snake_case : Optional[int] , **_snake_case : Tuple ):
raise requests.ConnectionError('Offline mode is enabled.' , request=_snake_case )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , _snake_case ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , _snake_case ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , _snake_case ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def UpperCamelCase_( *_snake_case : int , **_snake_case : Dict ):
"""simple docstring"""
__a =str(Path().resolve() )
with tempfile.TemporaryDirectory(*_snake_case , **_snake_case ) as tmp_dir:
try:
os.chdir(_snake_case )
yield
finally:
os.chdir(_snake_case )
@contextmanager
def UpperCamelCase_( ):
"""simple docstring"""
import gc
gc.collect()
__a =pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def UpperCamelCase_( ):
"""simple docstring"""
import gc
gc.collect()
__a =pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def UpperCamelCase_( _snake_case : Tuple , _snake_case : List[Any] ):
"""simple docstring"""
return deepcopy(_snake_case ).integers(0 , 100 , 10 ).tolist() == deepcopy(_snake_case ).integers(0 , 100 , 10 ).tolist()
def UpperCamelCase_( _snake_case : Any ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_snake_case : Union[str, Any] , *_snake_case : Optional[Any] , **_snake_case : Tuple ):
try:
return func(*_snake_case , **_snake_case )
except HTTPError as err:
if str(_snake_case ).startswith('500' ) or str(_snake_case ).startswith('502' ):
pytest.xfail(str(_snake_case ) )
raise err
return decorator.decorator(_wrapper , _snake_case )
class __magic_name__ :
def __init__( self , __snake_case , __snake_case , __snake_case ) -> Dict:
'''simple docstring'''
__a =returncode
__a =stdout
__a =stderr
async def UpperCamelCase_( _snake_case : Tuple , _snake_case : int ):
"""simple docstring"""
while True:
__a =await stream.readline()
if line:
callback(_snake_case )
else:
break
async def UpperCamelCase_( _snake_case : Optional[int] , _snake_case : int=None , _snake_case : str=None , _snake_case : Optional[Any]=None , _snake_case : Optional[Any]=False , _snake_case : Optional[Any]=False ):
"""simple docstring"""
if echo:
print('\nRunning: ' , ' '.join(_snake_case ) )
__a =await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_snake_case , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__a =[]
__a =[]
def tee(_snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : str="" ):
__a =line.decode('utf-8' ).rstrip()
sink.append(_snake_case )
if not quiet:
print(_snake_case , _snake_case , file=_snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _snake_case : tee(_snake_case , _snake_case , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda _snake_case : tee(_snake_case , _snake_case , sys.stderr , label='stderr:' ) ),
] , timeout=_snake_case , )
return _RunOutput(await p.wait() , _snake_case , _snake_case )
def UpperCamelCase_( _snake_case : Dict , _snake_case : Optional[int]=None , _snake_case : Union[str, Any]=None , _snake_case : Optional[Any]=180 , _snake_case : str=False , _snake_case : Dict=True ):
"""simple docstring"""
__a =asyncio.get_event_loop()
__a =loop.run_until_complete(
_stream_subprocess(_snake_case , env=_snake_case , stdin=_snake_case , timeout=_snake_case , quiet=_snake_case , echo=_snake_case ) )
__a =" ".join(_snake_case )
if result.returncode > 0:
__a ="\n".join(result.stderr )
raise RuntimeError(
F'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
F'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'\'{cmd_str}\' produced no output.' )
return result
def UpperCamelCase_( ):
"""simple docstring"""
__a =os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
__a =re.sub(r'^gw' , '' , _snake_case , 0 , re.M )
return int(_snake_case )
def UpperCamelCase_( ):
"""simple docstring"""
__a =29500
__a =pytest_xdist_worker_id()
return port + uniq_delta
| 242 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowercase ( __magic_name__="" ):
'''simple docstring'''
UpperCAmelCase : Dict = tempfile.mkdtemp()
return os.path.join(__magic_name__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
UpperCAmelCase : int = AgentAudio(snake_case )
UpperCAmelCase : str = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(snake_case ) )
# Ensure that the file contains the same value as the original tensor
UpperCAmelCase , UpperCAmelCase : str = sf.read(snake_case )
self.assertTrue(torch.allclose(snake_case , torch.tensor(snake_case ) , atol=1e-4 ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
UpperCAmelCase : Any = get_new_path(suffix=".wav" )
sf.write(snake_case , snake_case , 1_6_0_0_0 )
UpperCAmelCase : Optional[Any] = AgentAudio(snake_case )
self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , snake_case )
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) )
UpperCAmelCase : Tuple = AgentImage(snake_case )
UpperCAmelCase : Tuple = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
UpperCAmelCase : Any = Image.open(snake_case )
UpperCAmelCase : List[str] = AgentImage(snake_case )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
UpperCAmelCase : Dict = Image.open(snake_case )
UpperCAmelCase : int = AgentImage(snake_case )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = "Hey!"
UpperCAmelCase : Tuple = AgentText(snake_case )
self.assertEqual(snake_case , agent_type.to_string() )
self.assertEqual(snake_case , agent_type.to_raw() )
self.assertEqual(snake_case , snake_case )
| 679 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : Union[str, Any] = {
"google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json",
"google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json",
"google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json",
"google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class _SCREAMING_SNAKE_CASE ( lowercase__ ):
'''simple docstring'''
lowerCamelCase__ = "mobilenet_v2"
def __init__( self : Optional[int] , __lowerCamelCase : str=3 , __lowerCamelCase : Dict=224 , __lowerCamelCase : Any=1.0 , __lowerCamelCase : Dict=8 , __lowerCamelCase : List[Any]=8 , __lowerCamelCase : Dict=6 , __lowerCamelCase : str=32 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Dict="relu6" , __lowerCamelCase : str=True , __lowerCamelCase : Optional[Any]=0.8 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : List[str]=0.001 , __lowerCamelCase : str=255 , **__lowerCamelCase : Tuple , ):
super().__init__(**__lowerCamelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = depth_multiplier
SCREAMING_SNAKE_CASE = depth_divisible_by
SCREAMING_SNAKE_CASE = min_depth
SCREAMING_SNAKE_CASE = expand_ratio
SCREAMING_SNAKE_CASE = output_stride
SCREAMING_SNAKE_CASE = first_layer_is_expansion
SCREAMING_SNAKE_CASE = finegrained_output
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = tf_padding
SCREAMING_SNAKE_CASE = classifier_dropout_prob
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = semantic_loss_ignore_index
class _SCREAMING_SNAKE_CASE ( lowercase__ ):
'''simple docstring'''
lowerCamelCase__ = version.parse("1.11" )
@property
def _snake_case ( self : int ):
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _snake_case ( self : str ):
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _snake_case ( self : Optional[Any] ):
return 1e-4 | 16 |
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
def get_masked_lm_array(__magic_name__ ):
UpperCAmelCase : Tuple = F"masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : List[str] = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : str = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_array(__magic_name__ ):
UpperCAmelCase : List[Any] = F"encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : Optional[Any] = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : str = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_layer_array(__magic_name__ , __magic_name__ ):
UpperCAmelCase : Union[str, Any] = F"encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : int = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : Optional[int] = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_attention_layer_array(__magic_name__ , __magic_name__ , __magic_name__ ):
UpperCAmelCase : Tuple = F"encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : List[str] = tf.train.load_variable(__magic_name__ , __magic_name__ )
UpperCAmelCase : int = array.reshape(__magic_name__ )
if "kernel" in name:
UpperCAmelCase : Optional[Any] = array.transpose()
return torch.from_numpy(__magic_name__ )
print(F"Loading model based on config from {config_path}..." )
UpperCAmelCase : Optional[Any] = BertConfig.from_json_file(__magic_name__ )
UpperCAmelCase : Optional[Any] = BertForMaskedLM(__magic_name__ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
UpperCAmelCase : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
UpperCAmelCase : BertSelfAttention = layer.attention.self
UpperCAmelCase : List[Any] = get_encoder_attention_layer_array(
__magic_name__ , "_query_dense/kernel" , self_attn.query.weight.data.shape )
UpperCAmelCase : Tuple = get_encoder_attention_layer_array(
__magic_name__ , "_query_dense/bias" , self_attn.query.bias.data.shape )
UpperCAmelCase : int = get_encoder_attention_layer_array(
__magic_name__ , "_key_dense/kernel" , self_attn.key.weight.data.shape )
UpperCAmelCase : Optional[int] = get_encoder_attention_layer_array(
__magic_name__ , "_key_dense/bias" , self_attn.key.bias.data.shape )
UpperCAmelCase : Tuple = get_encoder_attention_layer_array(
__magic_name__ , "_value_dense/kernel" , self_attn.value.weight.data.shape )
UpperCAmelCase : str = get_encoder_attention_layer_array(
__magic_name__ , "_value_dense/bias" , self_attn.value.bias.data.shape )
# Self-attention Output
UpperCAmelCase : BertSelfOutput = layer.attention.output
UpperCAmelCase : str = get_encoder_attention_layer_array(
__magic_name__ , "_output_dense/kernel" , self_output.dense.weight.data.shape )
UpperCAmelCase : Union[str, Any] = get_encoder_attention_layer_array(
__magic_name__ , "_output_dense/bias" , self_output.dense.bias.data.shape )
UpperCAmelCase : str = get_encoder_layer_array(__magic_name__ , "_attention_layer_norm/gamma" )
UpperCAmelCase : List[str] = get_encoder_layer_array(__magic_name__ , "_attention_layer_norm/beta" )
# Intermediate
UpperCAmelCase : BertIntermediate = layer.intermediate
UpperCAmelCase : Dict = get_encoder_layer_array(__magic_name__ , "_intermediate_dense/kernel" )
UpperCAmelCase : Tuple = get_encoder_layer_array(__magic_name__ , "_intermediate_dense/bias" )
# Output
UpperCAmelCase : BertOutput = layer.output
UpperCAmelCase : Optional[Any] = get_encoder_layer_array(__magic_name__ , "_output_dense/kernel" )
UpperCAmelCase : Optional[Any] = get_encoder_layer_array(__magic_name__ , "_output_dense/bias" )
UpperCAmelCase : List[str] = get_encoder_layer_array(__magic_name__ , "_output_layer_norm/gamma" )
UpperCAmelCase : Any = get_encoder_layer_array(__magic_name__ , "_output_layer_norm/beta" )
# Embeddings
UpperCAmelCase : int = get_encoder_array("_position_embedding_layer/embeddings" )
UpperCAmelCase : str = get_encoder_array("_type_embedding_layer/embeddings" )
UpperCAmelCase : Optional[Any] = get_encoder_array("_embedding_norm_layer/gamma" )
UpperCAmelCase : Any = get_encoder_array("_embedding_norm_layer/beta" )
# LM Head
UpperCAmelCase : str = model.cls.predictions.transform
UpperCAmelCase : List[Any] = get_masked_lm_array("dense/kernel" )
UpperCAmelCase : List[Any] = get_masked_lm_array("dense/bias" )
UpperCAmelCase : Optional[Any] = get_masked_lm_array("layer_norm/gamma" )
UpperCAmelCase : Union[str, Any] = get_masked_lm_array("layer_norm/beta" )
UpperCAmelCase : Optional[Any] = get_masked_lm_array("embedding_table" )
# Pooling
UpperCAmelCase : str = BertPooler(config=__magic_name__ )
UpperCAmelCase : BertPooler = get_encoder_array("_pooler_layer/kernel" )
UpperCAmelCase : BertPooler = get_encoder_array("_pooler_layer/bias" )
# Export final model
model.save_pretrained(__magic_name__ )
# Integration test - should load without any errors ;)
UpperCAmelCase : Optional[int] = BertForMaskedLM.from_pretrained(__magic_name__ )
print(new_model.eval() )
print("Model conversion was done sucessfully!" )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
a : Any = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 679 | 0 |
from timeit import timeit
def snake_case__ ( lowerCamelCase_ ):
if number < 0:
raise ValueError('''the value of input must not be negative''' )
A : int = 0
while number:
number &= number - 1
result += 1
return result
def snake_case__ ( lowerCamelCase_ ):
if number < 0:
raise ValueError('''the value of input must not be negative''' )
A : Optional[Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def snake_case__ ( ):
def do_benchmark(lowerCamelCase_ ) -> None:
A : Optional[int] = "import __main__ as z"
print(F'Benchmark when {number = }:' )
print(F'{get_set_bits_count_using_modulo_operator(lowerCamelCase_ ) = }' )
A : Union[str, Any] = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=lowerCamelCase_ )
print(F'timeit() runs in {timing} seconds' )
print(F'{get_set_bits_count_using_brian_kernighans_algorithm(lowerCamelCase_ ) = }' )
A : str = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=lowerCamelCase_ , )
print(F'timeit() runs in {timing} seconds' )
for number in (25, 37, 58, 0):
do_benchmark(lowerCamelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 542 |
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
a : str = "src/transformers"
# Matches is_xxx_available()
a : Union[str, Any] = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
a : int = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a : Any = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
a : Dict = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
a : Any = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a : List[str] = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
a : Union[str, Any] = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
a : List[str] = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
a : Any = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
a : Union[str, Any] = re.compile(R"^\s*try:")
# Catches a line with else:
a : Tuple = re.compile(R"^\s*else:")
def lowercase ( __magic_name__ ):
'''simple docstring'''
if _re_test_backend.search(__magic_name__ ) is None:
return None
UpperCAmelCase : Optional[int] = [b[0] for b in _re_backend.findall(__magic_name__ )]
backends.sort()
return "_and_".join(__magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
with open(__magic_name__ , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase : str = f.readlines()
UpperCAmelCase : Optional[int] = 0
while line_index < len(__magic_name__ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__magic_name__ ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCAmelCase : str = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
UpperCAmelCase : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__magic_name__ ):
UpperCAmelCase : int = _re_one_line_import_struct.search(__magic_name__ ).groups()[0]
UpperCAmelCase : Any = re.findall("\[([^\]]+)\]" , __magic_name__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
UpperCAmelCase : Optional[int] = _re_import_struct_key_value.search(__magic_name__ )
if single_line_import_search is not None:
UpperCAmelCase : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
UpperCAmelCase : Dict = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCAmelCase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase : List[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
UpperCAmelCase : List[str] = lines[line_index]
if _re_import_struct_add_one.search(__magic_name__ ) is not None:
objects.append(_re_import_struct_add_one.search(__magic_name__ ).groups()[0] )
elif _re_import_struct_add_many.search(__magic_name__ ) is not None:
UpperCAmelCase : List[str] = _re_import_struct_add_many.search(__magic_name__ ).groups()[0].split(", " )
UpperCAmelCase : int = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif _re_between_brackets.search(__magic_name__ ) is not None:
UpperCAmelCase : Optional[Any] = _re_between_brackets.search(__magic_name__ ).groups()[0].split(", " )
UpperCAmelCase : Optional[int] = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif _re_quote_object.search(__magic_name__ ) is not None:
objects.append(_re_quote_object.search(__magic_name__ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
UpperCAmelCase : Optional[int] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCAmelCase : List[str] = []
while (
line_index < len(__magic_name__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
UpperCAmelCase : int = lines[line_index]
UpperCAmelCase : Tuple = _re_import.search(__magic_name__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCAmelCase : Optional[Any] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__magic_name__ ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCAmelCase : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
UpperCAmelCase : str = lines[line_index]
UpperCAmelCase : Tuple = _re_import.search(__magic_name__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCAmelCase : Dict = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
def find_duplicates(__magic_name__ ):
return [k for k, v in collections.Counter(__magic_name__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCAmelCase : Tuple = []
for key in import_dict_objects.keys():
UpperCAmelCase : List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
UpperCAmelCase : Any = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCAmelCase : List[Any] = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : int = []
for root, _, files in os.walk(__magic_name__ ):
if "__init__.py" in files:
UpperCAmelCase : Dict = os.path.join(__magic_name__ , "__init__.py" )
UpperCAmelCase : Optional[Any] = parse_init(__magic_name__ )
if objects is not None:
UpperCAmelCase : int = analyze_results(*__magic_name__ )
if len(__magic_name__ ) > 0:
UpperCAmelCase : Union[str, Any] = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(__magic_name__ ) )
if len(__magic_name__ ) > 0:
raise ValueError("\n\n".join(__magic_name__ ) )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = []
for path, directories, files in os.walk(__magic_name__ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__magic_name__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__magic_name__ ) / folder).glob("*.py" ) ) ) == 0:
continue
UpperCAmelCase : Any = str((Path(__magic_name__ ) / folder).relative_to(__magic_name__ ) )
UpperCAmelCase : Optional[Any] = short_path.replace(os.path.sep , "." )
submodules.append(__magic_name__ )
for fname in files:
if fname == "__init__.py":
continue
UpperCAmelCase : List[str] = str((Path(__magic_name__ ) / fname).relative_to(__magic_name__ ) )
UpperCAmelCase : str = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__magic_name__ )
return submodules
a : str = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = importlib.util.spec_from_file_location(
"transformers" , os.path.join(__magic_name__ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
UpperCAmelCase : Optional[int] = spec.loader.load_module()
UpperCAmelCase : Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__magic_name__ ) > 0:
UpperCAmelCase : List[str] = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 679 | 0 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
__lowerCAmelCase = to_pil_image(__snake_case )
__lowerCAmelCase = pil_image.size
__lowerCAmelCase = pytesseract.image_to_data(__snake_case , lang=__snake_case , output_type="dict" , config=__snake_case )
__lowerCAmelCase = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
__lowerCAmelCase = [idx for idx, word in enumerate(__snake_case ) if not word.strip()]
__lowerCAmelCase = [word for idx, word in enumerate(__snake_case ) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(__snake_case ) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(__snake_case ) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(__snake_case ) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(__snake_case ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__lowerCAmelCase = []
for x, y, w, h in zip(__snake_case , __snake_case , __snake_case , __snake_case ):
__lowerCAmelCase = [x, y, x + w, y + h]
actual_boxes.append(__snake_case )
# finally, normalize the bounding boxes
__lowerCAmelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__snake_case , __snake_case , __snake_case ) )
assert len(__snake_case ) == len(__snake_case ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _UpperCamelCase (lowercase__ ):
snake_case_ = ["pixel_values"]
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_5_5 , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = "" , **__UpperCamelCase , )-> Union[str, Any]:
super().__init__(**__UpperCamelCase )
__lowerCAmelCase = size if size is not None else {"height": 2_2_4, "width": 2_2_4}
__lowerCAmelCase = get_size_dict(__UpperCamelCase )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = resample
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_value
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
__lowerCAmelCase = apply_ocr
__lowerCAmelCase = ocr_lang
__lowerCAmelCase = tesseract_config
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , )-> Any:
__lowerCAmelCase = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__lowerCAmelCase = (size["height"], size["width"])
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> Any:
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> int:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase=None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , )-> Optional[int]:
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(__UpperCamelCase )
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
__lowerCAmelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
__lowerCAmelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
__lowerCAmelCase = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("If do_normalize is True, image_mean and image_std must be specified." )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(__UpperCamelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , "pytesseract" )
__lowerCAmelCase = []
__lowerCAmelCase = []
for image in images:
__lowerCAmelCase = apply_tesseract(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
words_batch.append(__UpperCamelCase )
boxes_batch.append(__UpperCamelCase )
if do_resize:
__lowerCAmelCase = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_rescale:
__lowerCAmelCase = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
__lowerCAmelCase = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
__lowerCAmelCase = BatchFeature(data={"pixel_values": images} , tensor_type=__UpperCamelCase )
if apply_ocr:
__lowerCAmelCase = words_batch
__lowerCAmelCase = boxes_batch
return data
| 367 |
'''simple docstring'''
import os
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = os.path.dirname(os.path.realpath(__magic_name__ ) )
UpperCAmelCase : Any = os.path.join(__magic_name__ , "triangle.txt" )
with open(__magic_name__ ) as f:
UpperCAmelCase : str = f.readlines()
UpperCAmelCase : Optional[int] = []
for line in triangle:
UpperCAmelCase : List[str] = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(__magic_name__ ) )
a.append(__magic_name__ )
for i in range(1 , len(__magic_name__ ) ):
for j in range(len(a[i] ) ):
UpperCAmelCase : Union[str, Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0
UpperCAmelCase : List[str] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__magic_name__ , __magic_name__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 679 | 0 |
import math
def lowerCAmelCase__ ( _a : List[str] ):
snake_case_ : int = [True] * n
snake_case_ : Any = False
snake_case_ : Any = False
snake_case_ : int = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
snake_case_ : Dict = i * 2
while index < n:
snake_case_ : str = False
snake_case_ : List[Any] = index + i
snake_case_ : List[str] = [2]
for i in range(3 , _a , 2 ):
if is_prime[i]:
primes.append(_a )
return primes
def lowerCAmelCase__ ( _a : Optional[Any] = 99_99_66_66_33_33 ):
snake_case_ : Optional[Any] = math.floor(math.sqrt(_a ) ) + 1_00
snake_case_ : List[Any] = prime_sieve(_a )
snake_case_ : int = 0
snake_case_ : Dict = 0
snake_case_ : str = primes[prime_index]
while (last_prime**2) <= limit:
snake_case_ : str = primes[prime_index + 1]
snake_case_ : str = last_prime**2
snake_case_ : Tuple = next_prime**2
# Get numbers divisible by lps(current)
snake_case_ : Union[str, Any] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
snake_case_ : Tuple = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
snake_case_ : Any = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
snake_case_ : List[Any] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 568 |
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if n == 1 or not isinstance(__magic_name__ , __magic_name__ ):
return 0
elif n == 2:
return 1
else:
UpperCAmelCase : Optional[int] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Union[str, Any] = 2
while digits < n:
index += 1
UpperCAmelCase : Any = len(str(fibonacci(__magic_name__ ) ) )
return index
def lowercase ( __magic_name__ = 1000 ):
'''simple docstring'''
return fibonacci_digits_index(__magic_name__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 679 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class _snake_case:
__snake_case: Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} )
__snake_case: Optional[str] = field(
default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} )
__snake_case: Optional[str] = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} )
__snake_case: Optional[str] = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
__snake_case: Optional[int] = field(default=2 , metadata={'''help''': '''Batch size for training.'''} )
__snake_case: Optional[int] = field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} )
__snake_case: Optional[float] = field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} )
__snake_case: Optional[int] = field(
default=1_00_00 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} )
__snake_case: Optional[float] = field(default=2E-4 , metadata={'''help''': '''Learning rate fo training.'''} )
__snake_case: Optional[str] = field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} )
__snake_case: Optional[int] = field(
default=7_50 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} )
__snake_case: Optional[int] = field(
default=16 , metadata={'''help''': '''Number of gradient accumulation steps.'''} )
__snake_case: Optional[bool] = field(
default=lowercase__ , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} )
__snake_case: Optional[int] = field(default=5_00_00 , metadata={'''help''': '''Maximum number of training steps.'''} )
__snake_case: Optional[int] = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
__snake_case: Optional[int] = field(default=10_24 , metadata={'''help''': '''Sequence lengths used for training.'''} )
__snake_case: Optional[int] = field(default=1 , metadata={'''help''': '''Training seed.'''} )
__snake_case: Optional[int] = field(
default=10_24 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , )
__snake_case: Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} )
__snake_case: Optional[bool] = field(default=lowercase__ , metadata={'''help''': '''If True the data is pretokenized.'''} )
@dataclass
class _snake_case:
__snake_case: Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
__snake_case: Optional[str] = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
__snake_case: Optional[int] = field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} )
__snake_case: Optional[int] = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
__snake_case: Optional[int] = field(default=10_24 , metadata={'''help''': '''Length of sequences to be evaluated.'''} )
__snake_case: Optional[int] = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
@dataclass
class _snake_case:
__snake_case: Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
__snake_case: Optional[int] = field(default=lowercase__ , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
__snake_case: Optional[int] = field(
default=lowercase__ , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , )
__snake_case: Optional[bool] = field(
default=lowercase__ , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} )
__snake_case: Optional[float] = field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} )
__snake_case: Optional[int] = field(default=2_56 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} )
__snake_case: Optional[int] = field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} )
__snake_case: Optional[float] = field(default=0.95 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} )
__snake_case: Optional[int] = field(default=10 , metadata={'''help''': '''Number of generations to run in parallel.'''} )
__snake_case: Optional[int] = field(
default=2_00 , metadata={'''help''': '''Number of completions to generate for each sample.'''} )
__snake_case: Optional[int] = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
__snake_case: Optional[str] = field(
default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} )
__snake_case: Optional[str] = field(
default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} )
__snake_case: Optional[int] = field(
default=-1 , metadata={
'''help''': (
'''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'''
''' number corresponds to which GPU device id to run on.'''
)
} , )
@dataclass
class _snake_case:
__snake_case: Optional[int] = field(
default=lowercase__ , metadata={
'''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'''
} , )
__snake_case: Optional[str] = field(
default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} )
__snake_case: Optional[str] = field(
default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} )
__snake_case: Optional[int] = field(
default=10_00_00 , metadata={'''help''': '''Number of files to save per JSON output file.'''} )
__snake_case: Optional[str] = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
__snake_case: Optional[float] = field(
default=10_00 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} )
__snake_case: Optional[float] = field(
default=1_00 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} )
__snake_case: Optional[float] = field(
default=0.25 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} )
__snake_case: Optional[float] = field(
default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} )
__snake_case: Optional[float] = field(
default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} )
__snake_case: Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , )
__snake_case: Optional[bool] = field(
default=lowercase__ , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} )
__snake_case: Optional[float] = field(
default=0.85 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} )
@dataclass
class _snake_case:
__snake_case: Optional[str] = field(
default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} )
__snake_case: Optional[str] = field(
default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} )
__snake_case: Optional[str] = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
__snake_case: Optional[int] = field(default=20_00_00 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} )
__snake_case: Optional[int] = field(
default=3_27_68 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} )
__snake_case: Optional[str] = field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} )
__snake_case: Optional[bool] = field(default=lowercase__ , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
@dataclass
class _snake_case:
__snake_case: Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} )
__snake_case: Optional[str] = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} )
__snake_case: Optional[str] = field(
default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} )
__snake_case: Optional[int] = field(default=lowercase__ , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
@dataclass
class _snake_case:
__snake_case: Optional[str] = field(
default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} )
__snake_case: Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} )
__snake_case: Optional[str] = field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} )
__snake_case: Optional[bool] = field(default=lowercase__ , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
| 531 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
a : List[str] = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
a : Dict = {
"169M": 7_68,
"430M": 10_24,
"1B5": 20_48,
"3B": 25_60,
"7B": 40_96,
"14B": 51_20,
}
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = list(state_dict.keys() )
for name in state_dict_keys:
UpperCAmelCase : str = state_dict.pop(__magic_name__ )
# emb -> embedding
if name.startswith("emb." ):
UpperCAmelCase : str = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
UpperCAmelCase : int = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
UpperCAmelCase : Optional[int] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __magic_name__ )
# ffn -> feed_forward
UpperCAmelCase : Tuple = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __magic_name__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
UpperCAmelCase : Optional[Any] = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
UpperCAmelCase : List[str] = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
UpperCAmelCase : List[Any] = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
UpperCAmelCase : List[str] = "rwkv." + name
UpperCAmelCase : List[Any] = weight
return state_dict
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=False , __magic_name__=None ):
'''simple docstring'''
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
UpperCAmelCase : List[str] = 5_0277
UpperCAmelCase : str = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
UpperCAmelCase : List[Any] = PreTrainedTokenizerFast(tokenizer_file=__magic_name__ )
UpperCAmelCase : List[Any] = len(__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
# 2. Build the config
UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
UpperCAmelCase : Union[str, Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." )
UpperCAmelCase : str = RwkvConfig(
vocab_size=__magic_name__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__magic_name__ )
# 3. Download model file then convert state_dict
UpperCAmelCase : Union[str, Any] = hf_hub_download(__magic_name__ , __magic_name__ )
UpperCAmelCase : Optional[Any] = torch.load(__magic_name__ , map_location="cpu" )
UpperCAmelCase : Union[str, Any] = convert_state_dict(__magic_name__ )
# 4. Split in shards and save
UpperCAmelCase , UpperCAmelCase : Any = shard_checkpoint(__magic_name__ )
for shard_file, shard in shards.items():
torch.save(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) )
if index is not None:
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
# Save the index as well
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
UpperCAmelCase : List[Any] = json.dumps(__magic_name__ , indent=2 , sort_keys=__magic_name__ ) + "\n"
f.write(__magic_name__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
UpperCAmelCase : Any = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
UpperCAmelCase : Dict = torch.load(os.path.join(__magic_name__ , __magic_name__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__magic_name__ , __magic_name__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
UpperCAmelCase : int = AutoModelForCausalLM.from_pretrained(__magic_name__ )
model.push_to_hub(__magic_name__ , max_shard_size="2GB" )
tokenizer.push_to_hub(__magic_name__ )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
a : Dict = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 679 | 0 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Tuple , *snake_case_ : int , **snake_case_ : Optional[Any] ):
"""simple docstring"""
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ ) | 256 |
'''simple docstring'''
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase : Optional[Any] = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
UpperCAmelCase : List[Any] = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
UpperCAmelCase : Dict = max(len(__magic_name__ ) , len(__magic_name__ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(__magic_name__ ) , b_binary.zfill(__magic_name__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679 | 0 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCamelCase_ ( lowercase__ ):
def __a ( self : List[str] , lowerCamelCase : Optional[int] ):
with open(lowerCamelCase , encoding='utf-8' ) as input_file:
lowerCamelCase_ : Dict = re.compile(r'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
lowerCamelCase_ : Tuple = input_file.read()
lowerCamelCase_ : List[Any] = regexp.search(lowerCamelCase )
return match
def __a ( self : int , lowerCamelCase : Optional[Any] ):
with open(lowerCamelCase , encoding='utf-8' ) as input_file:
lowerCamelCase_ : List[str] = re.compile(r'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
lowerCamelCase_ : List[Any] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowerCamelCase_ : str = regexp.finditer(lowerCamelCase )
lowerCamelCase_ : Union[str, Any] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __a ( self : List[str] ):
lowerCamelCase_ : Dict = Path('./datasets' )
lowerCamelCase_ : Optional[int] = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowerCamelCase ) ):
raise AssertionError(F"open(...) must use utf-8 encoding in {dataset}" )
def __a ( self : List[Any] ):
lowerCamelCase_ : Union[str, Any] = Path('./datasets' )
lowerCamelCase_ : Any = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowerCamelCase ) ):
raise AssertionError(F"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 364 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
a : Optional[Any] = "pt"
elif is_tf_available():
a : List[Any] = "tf"
else:
a : List[Any] = "jax"
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = PerceiverTokenizer
SCREAMING_SNAKE_CASE__ : List[str] = False
def A_ ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : List[str] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A_ ( self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def A_ ( self , **snake_case ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def A_ ( self , snake_case , snake_case=False , snake_case=2_0 , snake_case=5 ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = []
for i in range(len(snake_case ) ):
try:
UpperCAmelCase : int = tokenizer.decode([i] , clean_up_tokenization_spaces=snake_case )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCAmelCase : Optional[int] = list(filter(lambda snake_case : re.match(r"^[ a-zA-Z]+$" , t[1] ) , snake_case ) )
UpperCAmelCase : Any = list(filter(lambda snake_case : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=snake_case ) , snake_case ) )
if max_length is not None and len(snake_case ) > max_length:
UpperCAmelCase : Optional[Any] = toks[:max_length]
if min_length is not None and len(snake_case ) < min_length and len(snake_case ) > 0:
while len(snake_case ) < min_length:
UpperCAmelCase : Any = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase : Dict = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase : Any = tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case )
if " " not in output_txt and len(snake_case ) > 1:
UpperCAmelCase : Dict = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=snake_case )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=snake_case )
)
if with_prefix_space:
UpperCAmelCase : Union[str, Any] = " " + output_txt
UpperCAmelCase : Dict = tokenizer.encode(snake_case , add_special_tokens=snake_case )
return output_txt, output_ids
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.perceiver_tokenizer
UpperCAmelCase : Tuple = "Unicode €."
UpperCAmelCase : int = tokenizer(snake_case )
UpperCAmelCase : Tuple = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded["input_ids"] , snake_case )
# decoding
UpperCAmelCase : Optional[Any] = tokenizer.decode(snake_case )
self.assertEqual(snake_case , "[CLS]Unicode €.[SEP]" )
UpperCAmelCase : Tuple = tokenizer("e è é ê ë" )
UpperCAmelCase : str = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded["input_ids"] , snake_case )
# decoding
UpperCAmelCase : Dict = tokenizer.decode(snake_case )
self.assertEqual(snake_case , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.perceiver_tokenizer
UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
UpperCAmelCase : List[str] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
UpperCAmelCase : Dict = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case )
self.assertIsInstance(snake_case , snake_case )
if FRAMEWORK != "jax":
UpperCAmelCase : List[Any] = list(batch.input_ids.numpy()[0] )
else:
UpperCAmelCase : str = list(batch.input_ids.tolist()[0] )
self.assertListEqual(snake_case , snake_case )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.perceiver_tokenizer
UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase : List[Any] = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , snake_case )
self.assertIn("attention_mask" , snake_case )
self.assertNotIn("decoder_input_ids" , snake_case )
self.assertNotIn("decoder_attention_mask" , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.perceiver_tokenizer
UpperCAmelCase : int = [
"Summary of the text.",
"Another summary.",
]
UpperCAmelCase : List[Any] = tokenizer(
text_target=snake_case , max_length=3_2 , padding="max_length" , truncation=snake_case , return_tensors=snake_case )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
UpperCAmelCase : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase : Dict = tempfile.mkdtemp()
UpperCAmelCase : Any = " He is very happy, UNwant\u00E9d,running"
UpperCAmelCase : int = tokenizer.encode(snake_case , add_special_tokens=snake_case )
tokenizer.save_pretrained(snake_case )
UpperCAmelCase : List[str] = tokenizer.__class__.from_pretrained(snake_case )
UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
shutil.rmtree(snake_case )
UpperCAmelCase : Dict = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase : str = tempfile.mkdtemp()
UpperCAmelCase : int = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
UpperCAmelCase : int = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase : List[str] = tokenizer.encode(snake_case , add_special_tokens=snake_case )
tokenizer.save_pretrained(snake_case )
UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(snake_case )
UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
UpperCAmelCase : Optional[int] = tokenizer.__class__.from_pretrained(snake_case , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(snake_case )
with open(os.path.join(snake_case , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase : Union[str, Any] = json.load(snake_case )
with open(os.path.join(snake_case , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase : Any = json.load(snake_case )
UpperCAmelCase : str = [f"<extra_id_{i}>" for i in range(1_2_5 )]
UpperCAmelCase : List[Any] = added_tokens_extra_ids + [
"an_additional_special_token"
]
UpperCAmelCase : List[str] = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(snake_case , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case , snake_case )
with open(os.path.join(snake_case , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case , snake_case )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase : Optional[Any] = tokenizer_class.from_pretrained(
snake_case , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase : Optional[int] = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=snake_case )]
UpperCAmelCase : Optional[int] = tokenizer_class.from_pretrained(
snake_case , additional_special_tokens=snake_case , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , "�" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.get_tokenizers(fast=snake_case , do_lower_case=snake_case )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase : List[Any] = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
UpperCAmelCase : int = tokenizer.convert_tokens_to_string(snake_case )
self.assertIsInstance(snake_case , snake_case )
| 679 | 0 |
__a : Union[str, Any] = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__a : str = [{"type": "code", "content": INSTALL_CONTENT}]
__a : List[Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 606 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : str = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = "efficientformer"
def __init__( self , snake_case = [3, 2, 6, 4] , snake_case = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case = [True, True, True, True] , snake_case = 4_4_8 , snake_case = 3_2 , snake_case = 4 , snake_case = 7 , snake_case = 5 , snake_case = 8 , snake_case = 4 , snake_case = 0.0 , snake_case = 1_6 , snake_case = 3 , snake_case = 3 , snake_case = 3 , snake_case = 2 , snake_case = 1 , snake_case = 0.0 , snake_case = 1 , snake_case = True , snake_case = True , snake_case = 1e-5 , snake_case = "gelu" , snake_case = 0.02 , snake_case = 1e-12 , snake_case = 2_2_4 , snake_case = 1e-05 , **snake_case , ):
'''simple docstring'''
super().__init__(**snake_case )
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Optional[Any] = hidden_dropout_prob
UpperCAmelCase : List[Any] = hidden_sizes
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : int = patch_size
UpperCAmelCase : Optional[int] = num_channels
UpperCAmelCase : Any = depths
UpperCAmelCase : Dict = mlp_expansion_ratio
UpperCAmelCase : List[str] = downsamples
UpperCAmelCase : List[Any] = dim
UpperCAmelCase : Any = key_dim
UpperCAmelCase : List[str] = attention_ratio
UpperCAmelCase : Union[str, Any] = resolution
UpperCAmelCase : List[str] = pool_size
UpperCAmelCase : Dict = downsample_patch_size
UpperCAmelCase : Optional[int] = downsample_stride
UpperCAmelCase : Any = downsample_pad
UpperCAmelCase : int = drop_path_rate
UpperCAmelCase : Optional[Any] = num_metaad_blocks
UpperCAmelCase : List[str] = distillation
UpperCAmelCase : int = use_layer_scale
UpperCAmelCase : List[str] = layer_scale_init_value
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Any = batch_norm_eps
| 679 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
_lowerCAmelCase = TypeVar("""T""")
_lowerCAmelCase = TypeVar("""U""")
class __UpperCamelCase ( Generic[T, U] ):
def __init__( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = key
_lowerCAmelCase : Tuple = val
_lowerCAmelCase : DoubleLinkedListNode[T, U] | None = None
_lowerCAmelCase : DoubleLinkedListNode[T, U] | None = None
def __repr__( self ):
'''simple docstring'''
return (
F"""Node: key: {self.key}, val: {self.val}, """
F"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class __UpperCamelCase ( Generic[T, U] ):
def __init__( self ):
'''simple docstring'''
_lowerCAmelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(_A ,_A )
_lowerCAmelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(_A ,_A )
_lowerCAmelCase : Optional[Any] = self.rear, self.head
def __repr__( self ):
'''simple docstring'''
_lowerCAmelCase : str = ["DoubleLinkedList"]
_lowerCAmelCase : Optional[int] = self.head
while node.next is not None:
rep.append(str(_A ) )
_lowerCAmelCase : Optional[int] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_lowerCAmelCase : Optional[int] = node
_lowerCAmelCase : Optional[Any] = previous
_lowerCAmelCase : List[str] = node
_lowerCAmelCase : Any = self.rear
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if node.prev is None or node.next is None:
return None
_lowerCAmelCase : Dict = node.next
_lowerCAmelCase : List[Any] = node.prev
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[Any] = None
return node
class __UpperCamelCase ( Generic[T, U] ):
_UpperCAmelCase = {}
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : DoubleLinkedList[T, U] = DoubleLinkedList()
_lowerCAmelCase : Optional[int] = capacity
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self ):
'''simple docstring'''
return (
F"""CacheInfo(hits={self.hits}, misses={self.miss}, """
F"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self ,_A ):
'''simple docstring'''
return key in self.cache
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if key in self.cache:
self.hits += 1
_lowerCAmelCase : DoubleLinkedListNode[T, U] = self.cache[key]
_lowerCAmelCase : Any = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_A )
return node.val
self.miss += 1
return None
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_lowerCAmelCase : Dict = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_A ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_lowerCAmelCase : Optional[Any] = DoubleLinkedListNode(_A ,_A )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_lowerCAmelCase : Optional[int] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_lowerCAmelCase : Union[str, Any] = value
self.list.add(_A )
@classmethod
def __lowerCamelCase ( cls ,_A = 128 ):
'''simple docstring'''
def cache_decorator_inner(_A ) -> Callable[..., U]:
def cache_decorator_wrapper(*_A ) -> U:
if func not in cls.decorator_function_to_instance_map:
_lowerCAmelCase : str = LRUCache(_A )
_lowerCAmelCase : Dict = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_lowerCAmelCase : Tuple = func(*_A )
cls.decorator_function_to_instance_map[func].put(args[0] ,_A )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_A ,'cache_info' ,_A ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=3 , snake_case=3_2 , snake_case=3 , snake_case=1_0 , snake_case=[1_0, 2_0, 3_0, 4_0] , snake_case=[1, 1, 2, 1] , snake_case=True , snake_case=True , snake_case="relu" , snake_case=3 , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : Dict = parent
UpperCAmelCase : int = batch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Union[str, Any] = num_channels
UpperCAmelCase : List[str] = embeddings_size
UpperCAmelCase : Any = hidden_sizes
UpperCAmelCase : int = depths
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : List[str] = use_labels
UpperCAmelCase : int = hidden_act
UpperCAmelCase : Union[str, Any] = num_labels
UpperCAmelCase : str = scope
UpperCAmelCase : str = len(snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def A_ ( self ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = TFResNetModel(config=snake_case )
UpperCAmelCase : int = model(snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.num_labels
UpperCAmelCase : List[Any] = TFResNetForImageClassification(snake_case )
UpperCAmelCase : Union[str, Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = config_and_inputs
UpperCAmelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ : Optional[int] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = TFResNetModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case )
def A_ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self ):
'''simple docstring'''
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def A_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(snake_case )
UpperCAmelCase : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : List[str] = [*signature.parameters.keys()]
UpperCAmelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case , snake_case , snake_case ):
UpperCAmelCase : Optional[Any] = model_class(snake_case )
UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : List[str] = self.model_tester.num_stages
self.assertEqual(len(snake_case ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[int] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase : str = layer_type
UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : str = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def A_ ( self ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Any = TFResNetModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase : Union[str, Any] = self.default_image_processor
UpperCAmelCase : Tuple = prepare_img()
UpperCAmelCase : str = image_processor(images=snake_case , return_tensors="tf" )
# forward pass
UpperCAmelCase : Any = model(**snake_case )
# verify the logits
UpperCAmelCase : Any = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCAmelCase : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case , atol=1e-4 ) )
| 679 | 0 |
from __future__ import annotations
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 615 |
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=9_9 , snake_case=6_4 , snake_case=5 , snake_case=4 , snake_case=6_4 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : List[Any] = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : int = seq_length
UpperCAmelCase : Dict = is_training
UpperCAmelCase : Optional[Any] = use_input_mask
UpperCAmelCase : Optional[Any] = use_token_type_ids
UpperCAmelCase : Optional[Any] = use_labels
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[str] = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Tuple = attention_probs_dropout_prob
UpperCAmelCase : Any = max_position_embeddings
UpperCAmelCase : Tuple = type_vocab_size
UpperCAmelCase : Union[str, Any] = type_sequence_label_size
UpperCAmelCase : int = initializer_range
UpperCAmelCase : Dict = num_labels
UpperCAmelCase : Union[str, Any] = num_choices
UpperCAmelCase : List[Any] = scope
def A_ ( self ):
'''simple docstring'''
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Any = None
if self.use_input_mask:
UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : str = None
UpperCAmelCase : Dict = None
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self ):
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MPNetModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Dict = model(snake_case , snake_case )
UpperCAmelCase : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : int = MPNetForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Dict = model(
snake_case , attention_mask=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : Optional[int] = MPNetForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Optional[int] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.num_choices
UpperCAmelCase : Optional[int] = MPNetForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Tuple = model(
snake_case , attention_mask=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.num_labels
UpperCAmelCase : Tuple = MPNetForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : str = config_and_inputs
UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Any = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : str = True
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MPNetModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=3_7 )
def A_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*snake_case )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = MPNetModel.from_pretrained("microsoft/mpnet-base" )
UpperCAmelCase : Optional[int] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCAmelCase : Optional[Any] = model(snake_case )[0]
UpperCAmelCase : Optional[int] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , snake_case )
UpperCAmelCase : Optional[Any] = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
| 679 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __magic_name__ ( lowercase__ ):
SCREAMING_SNAKE_CASE = "speech_to_text"
SCREAMING_SNAKE_CASE = ["past_key_values"]
SCREAMING_SNAKE_CASE = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , __snake_case=1_0000 , __snake_case=12 , __snake_case=2048 , __snake_case=4 , __snake_case=6 , __snake_case=2048 , __snake_case=4 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=True , __snake_case=True , __snake_case="relu" , __snake_case=256 , __snake_case=0.1 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=2 , __snake_case=True , __snake_case=1 , __snake_case=0 , __snake_case=2 , __snake_case=6000 , __snake_case=1024 , __snake_case=2 , __snake_case=(5, 5) , __snake_case=1024 , __snake_case=80 , __snake_case=1 , **__snake_case , ) -> Optional[int]:
'''simple docstring'''
__a =vocab_size
__a =d_model
__a =encoder_ffn_dim
__a =encoder_layers
__a =encoder_attention_heads
__a =decoder_ffn_dim
__a =decoder_layers
__a =decoder_attention_heads
__a =dropout
__a =attention_dropout
__a =activation_dropout
__a =activation_function
__a =init_std
__a =encoder_layerdrop
__a =decoder_layerdrop
__a =use_cache
__a =encoder_layers
__a =scale_embedding # scale factor will be sqrt(d_model) if True
__a =max_source_positions
__a =max_target_positions
__a =num_conv_layers
__a =list(__snake_case )
__a =conv_channels
__a =input_feat_per_channel
__a =input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
f'but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '
f'`config.num_conv_layers = {self.num_conv_layers}`.' )
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , **__snake_case , )
| 242 |
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a : Optional[Any] = logging.get_logger(__name__)
a : List[str] = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
UpperCAmelCase : List[str] = TOKENIZER_CLASSES
else:
UpperCAmelCase : int = {tokenizer_name: getattr(__magic_name__ , tokenizer_name + "Fast" )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
UpperCAmelCase : Tuple = TOKENIZER_CLASSES[tokenizer_name]
UpperCAmelCase : Union[str, Any] = True
if checkpoint_name is None:
UpperCAmelCase : List[str] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
UpperCAmelCase : Dict = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
UpperCAmelCase : Union[str, Any] = tokenizer_class.from_pretrained(__magic_name__ , force_download=__magic_name__ )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCAmelCase , UpperCAmelCase : Dict = checkpoint.split("/" )
UpperCAmelCase : Optional[int] = os.path.join(__magic_name__ , __magic_name__ )
elif add_prefix:
UpperCAmelCase : List[Any] = checkpoint
UpperCAmelCase : str = dump_path
else:
UpperCAmelCase : List[str] = None
UpperCAmelCase : List[Any] = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
UpperCAmelCase : List[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
UpperCAmelCase : List[Any] = file_path.split(__magic_name__ )[-1][0]
if next_char == "/":
UpperCAmelCase : str = os.path.join(__magic_name__ , __magic_name__ )
UpperCAmelCase : Dict = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
UpperCAmelCase : Any = tokenizer.save_pretrained(
__magic_name__ , legacy_format=__magic_name__ , filename_prefix=__magic_name__ )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(__magic_name__ )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
a : Any = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 679 | 0 |
import os
def __a ( ):
SCREAMING_SNAKE_CASE = os.path.dirname(os.path.realpath(A__ ) )
SCREAMING_SNAKE_CASE = os.path.join(A__ , "triangle.txt" )
with open(A__ ) as f:
SCREAMING_SNAKE_CASE = f.readlines()
SCREAMING_SNAKE_CASE = []
for line in triangle:
SCREAMING_SNAKE_CASE = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(A__ ) )
a.append(A__ )
for i in range(1 , len(A__ ) ):
for j in range(len(a[i] ) ):
SCREAMING_SNAKE_CASE = a[i - 1][j] if j != len(a[i - 1] ) else 0
SCREAMING_SNAKE_CASE = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(A__ , A__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution()) | 16 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "dandelin/vilt-b32-finetuned-vqa"
SCREAMING_SNAKE_CASE__ : Dict = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
SCREAMING_SNAKE_CASE__ : List[str] = "image_qa"
SCREAMING_SNAKE_CASE__ : int = AutoProcessor
SCREAMING_SNAKE_CASE__ : Tuple = AutoModelForVisualQuestionAnswering
SCREAMING_SNAKE_CASE__ : Any = ["image", "text"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["text"]
def __init__( self , *snake_case , **snake_case ):
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*snake_case , **snake_case )
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
return self.pre_processor(snake_case , snake_case , return_tensors="pt" )
def A_ ( self , snake_case ):
'''simple docstring'''
with torch.no_grad():
return self.model(**snake_case ).logits
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Any = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 679 | 0 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowercase : Optional[int] = logging.get_logger(__name__)
def snake_case__ ( lowerCamelCase_ ):
A : List[str] = R"\w+[.]\d+"
A : Dict = re.findall(lowerCamelCase_ , lowerCamelCase_ )
for pat in pats:
A : Tuple = key.replace(lowerCamelCase_ , '''_'''.join(pat.split('''.''' ) ) )
return key
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
A : List[str] = pt_tuple_key[:-1] + ("scale",)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A : Tuple = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A : Optional[int] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A : Dict = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A : int = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
A : Union[str, Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A : Union[str, Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A : Optional[int] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=42 ):
A : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A : Tuple = flax_model.init_weights(PRNGKey(lowerCamelCase_ ) )
A : Optional[Any] = flatten_dict(lowerCamelCase_ )
A : List[str] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A : Tuple = rename_key(lowerCamelCase_ )
A : List[str] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
A : Optional[int] = rename_key_and_reshape_tensor(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# also add unexpected weight so that warning is thrown
A : Optional[int] = jnp.asarray(lowerCamelCase_ )
return unflatten_dict(lowerCamelCase_ )
| 542 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
a : Optional[int] = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = R"\w+[.]\d+"
UpperCAmelCase : Dict = re.findall(__magic_name__ , __magic_name__ )
for pat in pats:
UpperCAmelCase : Tuple = key.replace(__magic_name__ , "_".join(pat.split("." ) ) )
return key
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase : Dict = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase : int = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase : Union[str, Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=42 ):
'''simple docstring'''
UpperCAmelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase : Tuple = flax_model.init_weights(PRNGKey(__magic_name__ ) )
UpperCAmelCase : Optional[Any] = flatten_dict(__magic_name__ )
UpperCAmelCase : List[str] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase : Tuple = rename_key(__magic_name__ )
UpperCAmelCase : List[str] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
UpperCAmelCase , UpperCAmelCase : Optional[int] = rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
UpperCAmelCase : Optional[int] = jnp.asarray(__magic_name__ )
return unflatten_dict(__magic_name__ )
| 679 | 0 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : Any = {"vocab_file": "vocab.json"}
lowerCamelCase : Optional[Any] = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCamelCase : str = {"mgp-str": 27}
class _UpperCamelCase (lowercase__ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __UpperCamelCase , __UpperCamelCase="[GO]" , __UpperCamelCase="[GO]" , __UpperCamelCase="[s]" , __UpperCamelCase="[GO]" , **__UpperCamelCase )-> str:
super().__init__(
unk_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , pad_token=__UpperCamelCase , **__UpperCamelCase , )
with open(__UpperCamelCase , encoding="utf-8" ) as vocab_handle:
__lowerCAmelCase = json.load(__UpperCamelCase )
__lowerCAmelCase = {v: k for k, v in self.vocab.items()}
@property
def __UpperCAmelCase ( self )-> Union[str, Any]:
return len(self.vocab )
def __UpperCAmelCase ( self )-> Optional[int]:
return dict(self.vocab , **self.added_tokens_encoder )
def __UpperCAmelCase ( self , __UpperCamelCase )-> int:
__lowerCAmelCase = []
for s in text:
char_tokens.extend(__UpperCamelCase )
return char_tokens
def __UpperCAmelCase ( self , __UpperCamelCase )-> Tuple:
return self.vocab.get(__UpperCamelCase , self.vocab.get(self.unk_token ) )
def __UpperCAmelCase ( self , __UpperCamelCase )-> Optional[Any]:
return self.decoder.get(__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = None )-> List[Any]:
if not os.path.isdir(__UpperCamelCase ):
logger.error("Vocabulary path ({}) should be a directory".format(__UpperCamelCase ) )
return
__lowerCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + "\n" )
return (vocab_file,)
| 367 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = (EulerDiscreteScheduler,)
SCREAMING_SNAKE_CASE__ : List[Any] = 10
def A_ ( self , **snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = {
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**snake_case )
return config
def A_ ( self ):
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case )
def A_ ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case , beta_end=snake_case )
def A_ ( self ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case )
def A_ ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase : Union[str, Any] = self.get_scheduler_config()
UpperCAmelCase : Optional[Any] = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = self.dummy_model()
UpperCAmelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : Any = sample.to(snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : Tuple = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : List[Any] = model(snake_case , snake_case )
UpperCAmelCase : str = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Dict = output.prev_sample
UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : List[Any] = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase : int = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCAmelCase : List[Any] = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = self.dummy_model()
UpperCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : int = sample.to(snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : str = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : Dict = model(snake_case , snake_case )
UpperCAmelCase : List[Any] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Any = output.prev_sample
UpperCAmelCase : Optional[int] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase : Optional[int] = self.get_scheduler_config()
UpperCAmelCase : Any = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : int = self.dummy_model()
UpperCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase : str = sample.to(snake_case )
for t in scheduler.timesteps:
UpperCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : List[Any] = model(snake_case , snake_case )
UpperCAmelCase : List[str] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Dict = output.prev_sample
UpperCAmelCase : Optional[int] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.scheduler_classes[0]
UpperCAmelCase : Tuple = self.get_scheduler_config()
UpperCAmelCase : Dict = scheduler_class(**snake_case , use_karras_sigmas=snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case )
UpperCAmelCase : List[str] = torch.manual_seed(0 )
UpperCAmelCase : Any = self.dummy_model()
UpperCAmelCase : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase : List[str] = sample.to(snake_case )
for t in scheduler.timesteps:
UpperCAmelCase : str = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : Dict = model(snake_case , snake_case )
UpperCAmelCase : Dict = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : List[str] = output.prev_sample
UpperCAmelCase : int = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
| 679 | 0 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class UpperCAmelCase_ ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
A : Tuple = WavaVecaPhonemeCTCTokenizer
A : Tuple = False
def _lowerCAmelCase ( self ) -> Any:
super().setUp()
snake_case_ : Optional[Any] = (
"<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː "
"ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː "
"ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 "
"oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ "
"pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ "
"yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ "
"əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ "
"ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ "
"ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ "
"uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ "
"ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ "
"ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ "
"ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"
).split(" " )
snake_case_ : Union[str, Any] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
snake_case_ : List[Any] = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
snake_case_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + "\n" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=20 , _SCREAMING_SNAKE_CASE=5 ) -> List[Any]:
snake_case_ : List[str] = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )) for i in range(len(_SCREAMING_SNAKE_CASE ) )]
snake_case_ : Optional[int] = list(filter(lambda _SCREAMING_SNAKE_CASE : [t[0]] == tokenizer.encode(t[1] , do_phonemize=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
if max_length is not None and len(_SCREAMING_SNAKE_CASE ) > max_length:
snake_case_ : Any = toks[:max_length]
if min_length is not None and len(_SCREAMING_SNAKE_CASE ) < min_length and len(_SCREAMING_SNAKE_CASE ) > 0:
while len(_SCREAMING_SNAKE_CASE ) < min_length:
snake_case_ : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
snake_case_ : str = [t[0] for t in toks]
# Ensure consistency
snake_case_ : str = tokenizer.decode(_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
if " " not in output_txt and len(_SCREAMING_SNAKE_CASE ) > 1:
snake_case_ : Union[str, Any] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
)
if with_prefix_space:
snake_case_ : Union[str, Any] = " " + output_txt
snake_case_ : int = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
return output_txt, output_ids
def _lowerCAmelCase ( self , **_SCREAMING_SNAKE_CASE ) -> Any:
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> int:
snake_case_ : List[Any] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
# check adding a single token
tokenizer.add_tokens("xxx" )
snake_case_ : Union[str, Any] = tokenizer("m xxx ɪ" , do_phonemize=_SCREAMING_SNAKE_CASE ).input_ids
self.assertEqual(_SCREAMING_SNAKE_CASE , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"] )
snake_case_ : Union[str, Any] = tokenizer("m aaa ɪ ccc" , do_phonemize=_SCREAMING_SNAKE_CASE ).input_ids
self.assertEqual(_SCREAMING_SNAKE_CASE , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
snake_case_ : List[Any] = tokenizer("maɪ c" , do_phonemize=_SCREAMING_SNAKE_CASE ).input_ids
self.assertEqual(_SCREAMING_SNAKE_CASE , [3, 200] ) # mai should be <unk> (=3)
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Union[str, Any] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
snake_case_ : Dict = "Hello how are you"
snake_case_ : str = tokenizer.phonemize(_SCREAMING_SNAKE_CASE , phonemizer_lang="en-us" )
self.assertEqual(_SCREAMING_SNAKE_CASE , "h ə l oʊ h aʊ ɑːɹ j uː" )
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : Optional[Any] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
snake_case_ : Union[str, Any] = "Hello how are you"
snake_case_ : Optional[Any] = tokenizer.phonemize(_SCREAMING_SNAKE_CASE , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(_SCREAMING_SNAKE_CASE ).input_ids , tokenizer(_SCREAMING_SNAKE_CASE , do_phonemize=_SCREAMING_SNAKE_CASE ).input_ids )
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : str = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
snake_case_ : Optional[int] = "Hello how are you"
snake_case_ : Optional[Any] = tokenizer.phonemize(_SCREAMING_SNAKE_CASE , phonemizer_lang="en-us" )
snake_case_ : int = tokenizer.decode(tokenizer(_SCREAMING_SNAKE_CASE ).input_ids )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : Optional[Any] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
snake_case_ : Union[str, Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
snake_case_ : str = tokenizer.decode(sample_ids[0] )
snake_case_ : int = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , batch_tokens[0] )
self.assertEqual(_SCREAMING_SNAKE_CASE , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ : Union[str, Any] = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
snake_case_ : str = "Hello how are you"
snake_case_ : Optional[Any] = tokenizer.phonemize(_SCREAMING_SNAKE_CASE , phonemizer_lang="en-us" )
self.assertEqual(_SCREAMING_SNAKE_CASE , "h ə l oʊ | h aʊ | ɑːɹ | j uː |" )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : Optional[Any] = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
snake_case_ : str = "Hello how are you"
snake_case_ : List[str] = tokenizer.phonemize(_SCREAMING_SNAKE_CASE , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(_SCREAMING_SNAKE_CASE ).input_ids , tokenizer(_SCREAMING_SNAKE_CASE , do_phonemize=_SCREAMING_SNAKE_CASE ).input_ids )
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ : Union[str, Any] = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
snake_case_ : str = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
snake_case_ : int = tokenizer.decode(sample_ids[0] )
snake_case_ : Any = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , batch_tokens[0] )
self.assertEqual(_SCREAMING_SNAKE_CASE , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
# decode with no word_del_token filter
snake_case_ : List[str] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , filter_word_delimiter_token=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , batch_tokens[0] )
self.assertEqual(_SCREAMING_SNAKE_CASE , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"] )
def _lowerCAmelCase ( self ) -> Any:
snake_case_ : int = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
snake_case_ : str = "Hello how are you"
snake_case_ : int = tokenizer.phonemize(_SCREAMING_SNAKE_CASE , phonemizer_lang="en-us" )
snake_case_ : int = tokenizer.decode(tokenizer(_SCREAMING_SNAKE_CASE ).input_ids , filter_word_delimiter_token=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ : List[Any] = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
snake_case_ : List[Any] = "Hello how are you"
snake_case_ : List[str] = tokenizer.phonemize(_SCREAMING_SNAKE_CASE , phonemizer_lang="en-us" )
snake_case_ : Any = tokenizer.decode(tokenizer(_SCREAMING_SNAKE_CASE ).input_ids , filter_word_delimiter_token=_SCREAMING_SNAKE_CASE )
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |" )] ).strip() , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : Any = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = "Hello how are you"
snake_case_ : Tuple = tokenizer(_SCREAMING_SNAKE_CASE , phonemizer_lang="en-us" ).input_ids
snake_case_ : Any = tokenizer(_SCREAMING_SNAKE_CASE , phonemizer_lang="fr-fr" ).input_ids
self.assertNotEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = tokenizer.decode(_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = tokenizer.decode(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , "h ə l oʊ h aʊ ɑːɹ j uː" )
self.assertEqual(_SCREAMING_SNAKE_CASE , "ɛ l o h aʊ a ʁ j u" )
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : Optional[Any] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
snake_case_ : Tuple = "Hello how Are you"
snake_case_ : List[str] = "hello how are you"
snake_case_ : Union[str, Any] = tokenizer(_SCREAMING_SNAKE_CASE ).input_ids
snake_case_ : Optional[int] = tokenizer(_SCREAMING_SNAKE_CASE ).input_ids
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> int:
snake_case_ : Any = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
tokenizer.add_tokens(["!", "?"] )
tokenizer.add_special_tokens({"cls_token": "$$$"} )
# fmt: off
snake_case_ : str = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
snake_case_ : Dict = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"] )
@staticmethod
def _lowerCAmelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ : Union[str, Any] = [d[key] for d in offsets]
return retrieved_list
def _lowerCAmelCase ( self ) -> int:
snake_case_ : Dict = self.get_tokenizer(word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
snake_case_ : int = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
snake_case_ : str = tokenizer.decode(_SCREAMING_SNAKE_CASE , output_char_offsets=_SCREAMING_SNAKE_CASE , filter_word_delimiter_token=_SCREAMING_SNAKE_CASE )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("text" in outputs )
self.assertTrue("char_offsets" in outputs )
self.assertTrue(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "char" ) , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "start_offset" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "end_offset" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Union[str, Any] = self.get_tokenizer(word_delimiter_token="|" )
def check_list_tuples_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertTrue(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertTrue(isinstance(outputs_list[0] , _SCREAMING_SNAKE_CASE ) )
# transform list to ModelOutput
snake_case_ : Union[str, Any] = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"] )
def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
[recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for la, la in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"] )
# fmt: off
snake_case_ : Dict = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
snake_case_ : str = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , output_char_offsets=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = [tokenizer.decode(_SCREAMING_SNAKE_CASE , output_char_offsets=_SCREAMING_SNAKE_CASE ) for ids in sample_ids]
check_list_tuples_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes" )
def _lowerCAmelCase ( self ) -> Dict:
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes" )
def _lowerCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" )
def _lowerCAmelCase ( self ) -> Dict:
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing" )
def _lowerCAmelCase ( self ) -> Optional[int]:
pass
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : List[Any] = self.get_tokenizers(do_lower_case=_SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
snake_case_ : Any = tokenizer.vocab_size
snake_case_ : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
self.assertNotEqual(_SCREAMING_SNAKE_CASE , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
snake_case_ : int = ["aaaaa bbbbbb", "cccccccccdddddddd"]
snake_case_ : List[str] = tokenizer.add_tokens(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = tokenizer.vocab_size
snake_case_ : Optional[Any] = len(_SCREAMING_SNAKE_CASE )
self.assertNotEqual(_SCREAMING_SNAKE_CASE , 0 )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(_SCREAMING_SNAKE_CASE , all_size + len(_SCREAMING_SNAKE_CASE ) )
snake_case_ : List[str] = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(len(_SCREAMING_SNAKE_CASE ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
snake_case_ : Optional[int] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
snake_case_ : List[Any] = tokenizer.add_special_tokens(_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = tokenizer.vocab_size
snake_case_ : List[Any] = len(_SCREAMING_SNAKE_CASE )
self.assertNotEqual(_SCREAMING_SNAKE_CASE , 0 )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(_SCREAMING_SNAKE_CASE , all_size_a + len(_SCREAMING_SNAKE_CASE ) )
snake_case_ : str = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(len(_SCREAMING_SNAKE_CASE ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
pass
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def _lowerCAmelCase ( self ) -> int:
pass
def _lowerCAmelCase ( self ) -> int:
snake_case_ : Optional[Any] = self.get_tokenizers(fast=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
snake_case_ : Tuple = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"]
snake_case_ : Optional[int] = tokenizer.convert_tokens_to_string(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(output["text"] , _SCREAMING_SNAKE_CASE )
| 568 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def A_ ( self , snake_case ):
'''simple docstring'''
with open(snake_case , encoding="utf-8" ) as input_file:
UpperCAmelCase : Dict = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
UpperCAmelCase : Tuple = input_file.read()
UpperCAmelCase : List[Any] = regexp.search(snake_case )
return match
def A_ ( self , snake_case ):
'''simple docstring'''
with open(snake_case , encoding="utf-8" ) as input_file:
UpperCAmelCase : List[str] = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
UpperCAmelCase : List[Any] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase : str = regexp.finditer(snake_case )
UpperCAmelCase : Union[str, Any] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = Path("./datasets" )
UpperCAmelCase : Optional[int] = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(snake_case ) ):
raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Path("./datasets" )
UpperCAmelCase : Any = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(snake_case ) ):
raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 679 | 0 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case( lowercase__ , unittest.TestCase ):
__snake_case: Optional[int] = RobertaTokenizer
__snake_case: str = RobertaTokenizerFast
__snake_case: Optional[Any] = True
__snake_case: List[Any] = {"cls_token": "<s>"}
def _UpperCamelCase (self : Optional[Any] ) -> str:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
A__ = dict(zip(a , range(len(a ) ) ) )
A__ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
A__ = {"unk_token": "<unk>"}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a ) )
def _UpperCamelCase (self : Optional[int] , **a : List[str] ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a )
def _UpperCamelCase (self : Union[str, Any] , **a : Any ) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **a )
def _UpperCamelCase (self : Tuple , a : Optional[int] ) -> List[str]:
"""simple docstring"""
A__ = "lower newer"
A__ = "lower newer"
return input_text, output_text
def _UpperCamelCase (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
A__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ = "lower newer"
A__ = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
A__ = tokenizer.tokenize(a ) # , add_prefix_space=True)
self.assertListEqual(a , a )
A__ = tokens + [tokenizer.unk_token]
A__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
def _UpperCamelCase (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
A__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=a ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=a ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def _UpperCamelCase (self : Dict ) -> int:
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained('roberta-base' )
A__ = tokenizer.encode('sequence builders' , add_special_tokens=a )
A__ = tokenizer.encode('multi-sequence build' , add_special_tokens=a )
A__ = tokenizer.encode(
'sequence builders' , add_special_tokens=a , add_prefix_space=a )
A__ = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=a , add_prefix_space=a )
A__ = tokenizer.build_inputs_with_special_tokens(a )
A__ = tokenizer.build_inputs_with_special_tokens(a , a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _UpperCamelCase (self : str ) -> Dict:
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = "Encode this sequence."
A__ = tokenizer.byte_encoder[" ".encode('utf-8' )[0]]
# Testing encoder arguments
A__ = tokenizer.encode(a , add_special_tokens=a , add_prefix_space=a )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(a , a )
A__ = tokenizer.encode(a , add_special_tokens=a , add_prefix_space=a )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(a , a )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
A__ = tokenizer.encode(a , add_special_tokens=a )
A__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(a , a )
# Testing spaces after special tokens
A__ = "<mask>"
tokenizer.add_special_tokens(
{'mask_token': AddedToken(a , lstrip=a , rstrip=a )} ) # mask token has a left space
A__ = tokenizer.convert_tokens_to_ids(a )
A__ = "Encode <mask> sequence"
A__ = "Encode <mask>sequence"
A__ = tokenizer.encode(a )
A__ = encoded.index(a )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(a , a )
A__ = tokenizer.encode(a )
A__ = encoded.index(a )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(a , a )
def _UpperCamelCase (self : str ) -> List[str]:
"""simple docstring"""
pass
def _UpperCamelCase (self : List[str] ) -> Optional[int]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ = self.rust_tokenizer_class.from_pretrained(a , **a )
A__ = self.tokenizer_class.from_pretrained(a , **a )
A__ = "A, <mask> AllenNLP sentence."
A__ = tokenizer_r.encode_plus(a , add_special_tokens=a , return_token_type_ids=a )
A__ = tokenizer_p.encode_plus(a , add_special_tokens=a , return_token_type_ids=a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
A__ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
A__ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
a , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
a , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def _UpperCamelCase (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
A__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=a , add_prefix_space=a , trim_offsets=a )
A__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
A__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , a )
self.assertEqual(post_processor_state['add_prefix_space'] , a )
self.assertEqual(post_processor_state['trim_offsets'] , a )
def _UpperCamelCase (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
A__ = f"""{text_of_1_token} {text_of_1_token}"""
A__ = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
A__ = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
A__ = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
A__ = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ), len(a ) + 1 + len(a )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
A__ = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ), len(a ) + 1 + len(a )) , )
A__ = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
A__ = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
A__ = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
A__ = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ), 1 + len(a ) + 1 + len(a )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
A__ = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ), 1 + len(a ) + 1 + len(a )) , )
| 531 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a : str = logging.getLogger(__name__)
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def A_ ( self , snake_case , snake_case , snake_case=None , snake_case=None ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.layer[current_layer](snake_case , snake_case , head_mask[current_layer] )
UpperCAmelCase : Optional[int] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , lowercase__ , )
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__(snake_case )
UpperCAmelCase : Dict = BertEncoderWithPabee(snake_case )
self.init_weights()
UpperCAmelCase : int = 0
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : List[Any] = 0
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = threshold
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = patience
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = 0
UpperCAmelCase : List[Any] = 0
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.inference_layers_num / self.inference_instances_num
UpperCAmelCase : List[Any] = (
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(snake_case )
@add_start_docstrings_to_model_forward(snake_case )
def A_ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=False , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
UpperCAmelCase : Dict = input_ids.size()
elif inputs_embeds is not None:
UpperCAmelCase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
UpperCAmelCase : Optional[int] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCAmelCase : Tuple = torch.ones(snake_case , device=snake_case )
if token_type_ids is None:
UpperCAmelCase : List[Any] = torch.zeros(snake_case , dtype=torch.long , device=snake_case )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCAmelCase : torch.Tensor = self.get_extended_attention_mask(snake_case , snake_case , snake_case )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = encoder_hidden_states.size()
UpperCAmelCase : List[str] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
UpperCAmelCase : int = torch.ones(snake_case , device=snake_case )
UpperCAmelCase : str = self.invert_attention_mask(snake_case )
else:
UpperCAmelCase : int = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCAmelCase : Dict = self.get_head_mask(snake_case , self.config.num_hidden_layers )
UpperCAmelCase : Tuple = self.embeddings(
input_ids=snake_case , position_ids=snake_case , token_type_ids=snake_case , inputs_embeds=snake_case )
UpperCAmelCase : int = embedding_output
if self.training:
UpperCAmelCase : int = []
for i in range(self.config.num_hidden_layers ):
UpperCAmelCase : List[Any] = self.encoder.adaptive_forward(
snake_case , current_layer=snake_case , attention_mask=snake_case , head_mask=snake_case )
UpperCAmelCase : Dict = self.pooler(snake_case )
UpperCAmelCase : List[Any] = output_layers[i](output_dropout(snake_case ) )
res.append(snake_case )
elif self.patience == 0: # Use all layers for inference
UpperCAmelCase : Union[str, Any] = self.encoder(
snake_case , attention_mask=snake_case , head_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
UpperCAmelCase : Optional[int] = self.pooler(encoder_outputs[0] )
UpperCAmelCase : List[str] = [output_layers[self.config.num_hidden_layers - 1](snake_case )]
else:
UpperCAmelCase : int = 0
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Optional[Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
UpperCAmelCase : Tuple = self.encoder.adaptive_forward(
snake_case , current_layer=snake_case , attention_mask=snake_case , head_mask=snake_case )
UpperCAmelCase : Any = self.pooler(snake_case )
UpperCAmelCase : int = output_layers[i](snake_case )
if regression:
UpperCAmelCase : Optional[Any] = logits.detach()
if patient_result is not None:
UpperCAmelCase : Union[str, Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
UpperCAmelCase : Optional[Any] = 0
else:
UpperCAmelCase : Any = logits.detach().argmax(dim=1 )
if patient_result is not None:
UpperCAmelCase : Tuple = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(snake_case ) ):
patient_counter += 1
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = logits
if patient_counter == self.patience:
break
UpperCAmelCase : int = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , lowercase__ , )
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__(snake_case )
UpperCAmelCase : Union[str, Any] = config.num_labels
UpperCAmelCase : Optional[Any] = BertModelWithPabee(snake_case )
UpperCAmelCase : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase : Any = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(snake_case )
def A_ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : int = self.bert(
input_ids=snake_case , attention_mask=snake_case , token_type_ids=snake_case , position_ids=snake_case , head_mask=snake_case , inputs_embeds=snake_case , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
UpperCAmelCase : Tuple = (logits[-1],)
if labels is not None:
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : List[Any] = 0
for ix, logits_item in enumerate(snake_case ):
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase : Dict = MSELoss()
UpperCAmelCase : Union[str, Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase : Optional[int] = CrossEntropyLoss()
UpperCAmelCase : Tuple = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
UpperCAmelCase : int = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
UpperCAmelCase : Tuple = (total_loss / total_weights,) + outputs
return outputs
| 679 | 0 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self : str , snake_case_ : List[Any] , snake_case_ : Any=13 , snake_case_ : int=7 , snake_case_ : Tuple=True , snake_case_ : Union[str, Any]=True , snake_case_ : Any=True , snake_case_ : Union[str, Any]=True , snake_case_ : Tuple=99 , snake_case_ : Optional[int]=64 , snake_case_ : Optional[Any]=5 , snake_case_ : Optional[Any]=4 , snake_case_ : Optional[Any]=37 , snake_case_ : Optional[Any]="gelu" , snake_case_ : int=0.1 , snake_case_ : Dict=0.1 , snake_case_ : Optional[Any]=512 , snake_case_ : Any=16 , snake_case_ : int=2 , snake_case_ : Tuple=0.02 , snake_case_ : Optional[int]=3 , snake_case_ : Optional[Any]=4 , snake_case_ : Dict=None , ):
"""simple docstring"""
A : Any = parent
A : str = batch_size
A : Optional[int] = seq_length
A : int = is_training
A : Optional[Any] = use_input_mask
A : List[Any] = use_token_type_ids
A : int = use_labels
A : int = vocab_size
A : str = hidden_size
A : Tuple = num_hidden_layers
A : List[Any] = num_attention_heads
A : int = intermediate_size
A : List[Any] = hidden_act
A : Any = hidden_dropout_prob
A : Dict = attention_probs_dropout_prob
A : List[Any] = max_position_embeddings
A : Optional[int] = type_vocab_size
A : Union[str, Any] = type_sequence_label_size
A : int = initializer_range
A : Optional[Any] = num_labels
A : Optional[Any] = num_choices
A : Any = scope
A : Dict = vocab_size - 1
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
A : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : int = None
if self.use_input_mask:
A : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
A : Tuple = None
if self.use_labels:
A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A : Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
A : Union[str, Any] = self.prepare_config_and_inputs()
A : Union[str, Any] = True
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self : Optional[int] , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : int ):
"""simple docstring"""
A : str = GPTNeoXModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A : Dict = model(snake_case_ , attention_mask=snake_case_ )
A : Union[str, Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self : Optional[Any] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : List[Any] ):
"""simple docstring"""
A : List[str] = True
A : Any = GPTNeoXModel(snake_case_ )
model.to(snake_case_ )
model.eval()
A : Any = model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : List[Any] ):
"""simple docstring"""
A : Any = GPTNeoXForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : str ):
"""simple docstring"""
A : Optional[Any] = self.num_labels
A : int = GPTNeoXForQuestionAnswering(snake_case_ )
model.to(snake_case_ )
model.eval()
A : Any = model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self : Dict , snake_case_ : str , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Tuple ):
"""simple docstring"""
A : int = self.num_labels
A : int = GPTNeoXForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : str = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self : Optional[int] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : str , snake_case_ : Union[str, Any] ):
"""simple docstring"""
A : Optional[int] = self.num_labels
A : List[Any] = GPTNeoXForTokenClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
A : Union[str, Any] = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self : Any , snake_case_ : Any , snake_case_ : Dict , snake_case_ : Optional[int] ):
"""simple docstring"""
A : Any = True
A : Tuple = GPTNeoXForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
# first forward pass
A : int = model(snake_case_ , attention_mask=snake_case_ , use_cache=snake_case_ )
A : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A : int = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
A : Dict = torch.cat([input_mask, next_mask] , dim=-1 )
A : Dict = model(snake_case_ , attention_mask=snake_case_ , output_hidden_states=snake_case_ )
A : Tuple = output_from_no_past["hidden_states"][0]
A : int = model(
snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ , output_hidden_states=snake_case_ , )["hidden_states"][0]
# select random slice
A : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
A : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
A : Optional[int] = self.prepare_config_and_inputs()
A : List[str] = config_and_inputs
A : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( lowercase__, lowercase__, lowercase__, unittest.TestCase ):
lowerCamelCase_ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase_ = (
{
"feature-extraction": GPTNeoXModel,
"question-answering": GPTNeoXForQuestionAnswering,
"text-classification": GPTNeoXForSequenceClassification,
"text-generation": GPTNeoXForCausalLM,
"token-classification": GPTNeoXForTokenClassification,
"zero-shot": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
A : Optional[Any] = GPTNeoXModelTester(self )
A : List[str] = ConfigTester(self , config_class=snake_case_ , hidden_size=64 , num_attention_heads=8 )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(snake_case_ , snake_case_ , snake_case_ )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(snake_case_ , snake_case_ , snake_case_ )
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
A : int = self.model_tester.prepare_config_and_inputs_for_decoder()
A : Optional[int] = None
self.model_tester.create_and_check_model_as_decoder(snake_case_ , snake_case_ , snake_case_ )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(snake_case_ , snake_case_ , snake_case_ )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*snake_case_ )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@unittest.skip(reason='''Feed forward chunking is not implemented''' )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def _UpperCAmelCase ( self : List[Any] , snake_case_ : Tuple ):
"""simple docstring"""
A : int = self.model_tester.prepare_config_and_inputs_for_common()
A : Optional[int] = ids_tensor([1, 10] , config.vocab_size )
A : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A : int = GPTNeoXModel(snake_case_ )
original_model.to(snake_case_ )
original_model.eval()
A : Any = original_model(snake_case_ ).last_hidden_state
A : Dict = original_model(snake_case_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A : int = {"type": scaling_type, "factor": 10.0}
A : List[str] = GPTNeoXModel(snake_case_ )
scaled_model.to(snake_case_ )
scaled_model.eval()
A : List[Any] = scaled_model(snake_case_ ).last_hidden_state
A : str = scaled_model(snake_case_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
A : Optional[int] = AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
for checkpointing in [True, False]:
A : str = GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(snake_case_ )
A : Tuple = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(snake_case_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
A : Any = "My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"
A : List[str] = model.generate(**snake_case_ , do_sample=snake_case_ , max_new_tokens=20 )
A : int = tokenizer.batch_decode(snake_case_ )[0]
self.assertEqual(snake_case_ , snake_case_ ) | 256 |
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : Tuple = tf.cast(math.pi , x.dtype )
UpperCAmelCase : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype )
UpperCAmelCase : List[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__magic_name__ , 3 )) ))
return x * cdf
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = tf.convert_to_tensor(__magic_name__ )
return x * tf.tanh(tf.math.softplus(__magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype )
UpperCAmelCase : int = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : Optional[Any] = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return tf.clip_by_value(_gelu(__magic_name__ ) , -10 , 10 )
def lowercase ( __magic_name__ , __magic_name__=-1 ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Dict = tf.split(__magic_name__ , 2 , axis=__magic_name__ )
return a * tf.math.sigmoid(__magic_name__ )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowercase ( __magic_name__ ):
'''simple docstring'''
return tf.keras.activations.gelu(__magic_name__ , approximate=__magic_name__ )
a : Tuple = tf.keras.activations.gelu
a : Dict = approximate_gelu_wrap
else:
a : List[str] = _gelu
a : List[Any] = _gelu_new
a : Optional[int] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowercase ( __magic_name__ ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 679 | 0 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
lowerCamelCase_ : List[Any] = prime_factors(lowerCAmelCase__ )
if is_square_free(lowerCAmelCase__ ):
return -1 if len(lowerCAmelCase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
'''simple docstring'''
from __future__ import annotations
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = order
# a_{0} ... a_{k}
UpperCAmelCase : Optional[int] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCAmelCase : List[Any] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCAmelCase : Dict = [0.0] * self.order
# y[n-1] ... y[n-k]
UpperCAmelCase : Optional[Any] = [0.0] * self.order
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
if len(snake_case ) < self.order:
UpperCAmelCase : Dict = [1.0, *a_coeffs]
if len(snake_case ) != self.order + 1:
UpperCAmelCase : Optional[Any] = (
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(snake_case )}"
)
raise ValueError(snake_case )
if len(snake_case ) != self.order + 1:
UpperCAmelCase : Optional[Any] = (
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(snake_case )}"
)
raise ValueError(snake_case )
UpperCAmelCase : Optional[int] = a_coeffs
UpperCAmelCase : Optional[Any] = b_coeffs
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCAmelCase : Optional[int] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCAmelCase : List[str] = self.input_history[:-1]
UpperCAmelCase : List[Any] = self.output_history[:-1]
UpperCAmelCase : str = sample
UpperCAmelCase : str = result
return result
| 679 | 0 |
def __magic_name__ ( lowercase_ ) -> str:
'''simple docstring'''
if n == 1 or not isinstance(lowercase_ , lowercase_ ):
return 0
elif n == 2:
return 1
else:
UpperCamelCase = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __magic_name__ ( lowercase_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = 2
while digits < n:
index += 1
UpperCamelCase = len(str(fibonacci(lowercase_ ) ) )
return index
def __magic_name__ ( lowercase_ = 1000 ) -> int:
'''simple docstring'''
return fibonacci_digits_index(lowercase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 606 |
'''simple docstring'''
import argparse
from collections import defaultdict
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = F"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Tuple = f.readlines()
UpperCAmelCase : Tuple = F"class {class_name}("
UpperCAmelCase : str = F"{4 * ' '}def {test_name}("
UpperCAmelCase : Dict = F"{8 * ' '}{correct_line.split()[0]}"
UpperCAmelCase : Tuple = F"{16 * ' '}{correct_line.split()[0]}"
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : List[str] = False
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : Dict = False
UpperCAmelCase : Tuple = 0
UpperCAmelCase : int = 0
UpperCAmelCase : Tuple = []
for line in lines:
if line.startswith(__magic_name__ ):
UpperCAmelCase : int = True
elif in_class and line.startswith(__magic_name__ ):
UpperCAmelCase : Dict = True
elif in_class and in_func and (line.startswith(__magic_name__ ) or line.startswith(__magic_name__ )):
UpperCAmelCase : List[str] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCAmelCase : List[str] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCAmelCase : List[str] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"{spaces * ' '}{correct_line}" )
UpperCAmelCase : List[str] = False
else:
new_lines.append(__magic_name__ )
with open(__magic_name__ , "w" ) as f:
for line in new_lines:
f.write(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__=None ):
'''simple docstring'''
if fail is not None:
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Optional[int] = {l.strip() for l in f.readlines()}
else:
UpperCAmelCase : Any = None
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Tuple = f.readlines()
UpperCAmelCase : int = defaultdict(__magic_name__ )
for line in correct_lines:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
a : List[Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 679 | 0 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCAmelCase = False
class __UpperCamelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' ,torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe.dual_guided(
prompt='first prompt' ,image=_A ,text_to_image_strength=0.7_5 ,generator=_A ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='numpy' ,).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_A )
_lowerCAmelCase : Dict = VersatileDiffusionPipeline.from_pretrained(_A ,torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Tuple = generator.manual_seed(0 )
_lowerCAmelCase : int = pipe.dual_guided(
prompt='first prompt' ,image=_A ,text_to_image_strength=0.7_5 ,generator=_A ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='numpy' ,).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' ,torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[int] = "cyberpunk 2077"
_lowerCAmelCase : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
_lowerCAmelCase : Tuple = torch.manual_seed(0 )
_lowerCAmelCase : str = pipe.dual_guided(
prompt=_A ,image=_A ,text_to_image_strength=0.7_5 ,generator=_A ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='numpy' ,).images
_lowerCAmelCase : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Optional[int] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_lowerCAmelCase : Optional[Any] = "A painting of a squirrel eating a burger "
_lowerCAmelCase : Optional[Any] = torch.manual_seed(0 )
_lowerCAmelCase : List[str] = pipe.text_to_image(
prompt=_A ,generator=_A ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='numpy' ).images
_lowerCAmelCase : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : int = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_lowerCAmelCase : List[Any] = pipe.image_variation(_A ,generator=_A ,output_type='numpy' ).images
_lowerCAmelCase : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : List[Any] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 259 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : TreeNode | None = None
SCREAMING_SNAKE_CASE__ : TreeNode | None = None
a : Optional[Any] = namedtuple("CoinsDistribResult", "moves excess")
def lowercase ( __magic_name__ ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(__magic_name__ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__magic_name__ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__magic_name__ ) != count_coins(__magic_name__ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(__magic_name__ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = get_distrib(node.left )
UpperCAmelCase , UpperCAmelCase : Any = get_distrib(node.right )
UpperCAmelCase : Optional[Any] = 1 - left_distrib_excess
UpperCAmelCase : int = 1 - right_distrib_excess
UpperCAmelCase : List[Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(__magic_name__ )
+ abs(__magic_name__ )
)
UpperCAmelCase : List[Any] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__magic_name__ , __magic_name__ )
return get_distrib(__magic_name__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679 | 0 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__magic_name__ : Tuple = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def a_ ( __lowerCAmelCase , __lowerCAmelCase ):
inspect_dataset(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase__ = path + ".py"
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def a_ ( __lowerCAmelCase , __lowerCAmelCase ):
inspect_metric(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase__ = path + ".py"
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase__ = get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def a_ ( __lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase__ = get_dataset_config_names(__lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase__ = get_dataset_infos(__lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
lowerCAmelCase__ = expected_configs[0]
assert expected_config in infos
lowerCAmelCase__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase__ = get_dataset_infos(__lowerCAmelCase )
assert expected_config in infos
lowerCAmelCase__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
| 615 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a : List[Any] = logging.get_logger(__name__)
a : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a : int = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a : Any = {
"allenai/led-base-16384": 1_63_84,
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Tuple = LEDTokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , snake_case=True , **snake_case , ):
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(snake_case , pre_tok_state.pop("type" ) )
UpperCAmelCase : Any = add_prefix_space
UpperCAmelCase : str = pre_tok_class(**snake_case )
UpperCAmelCase : int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase : Dict = "post_processor"
UpperCAmelCase : Dict = getattr(self.backend_tokenizer , snake_case , snake_case )
if tokenizer_component_instance:
UpperCAmelCase : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase : int = tuple(state["sep"] )
if "cls" in state:
UpperCAmelCase : Union[str, Any] = tuple(state["cls"] )
UpperCAmelCase : Tuple = False
if state.get("add_prefix_space" , snake_case ) != add_prefix_space:
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Optional[int] = True
if state.get("trim_offsets" , snake_case ) != trim_offsets:
UpperCAmelCase : Tuple = trim_offsets
UpperCAmelCase : List[str] = True
if changes_to_apply:
UpperCAmelCase : Optional[Any] = getattr(snake_case , state.pop("type" ) )
UpperCAmelCase : Tuple = component_class(**snake_case )
setattr(self.backend_tokenizer , snake_case , snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def A_ ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value
UpperCAmelCase : Optional[Any] = value
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case , **snake_case )
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case , **snake_case )
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : str = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def A_ ( self , snake_case , snake_case=None ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self , snake_case , snake_case = None , snake_case = PaddingStrategy.DO_NOT_PAD , snake_case = None , snake_case = None , ):
'''simple docstring'''
UpperCAmelCase : int = super()._pad(
encoded_inputs=snake_case , max_length=snake_case , padding_strategy=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , )
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase : int = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase : Optional[int] = len(encoded_inputs["global_attention_mask"] ) != len(snake_case )
if needs_to_be_padded:
UpperCAmelCase : Tuple = len(snake_case ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase : List[str] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase : Any = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 679 | 0 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __magic_name__ ( lowercase__ ):
def __init__( self , *__snake_case , __snake_case=None , __snake_case=None , **__snake_case ) -> int:
'''simple docstring'''
super().__init__(*__snake_case , **__snake_case )
__a =eval_examples
__a =post_process_function
def __magic_name__ ( self , __snake_case = None , __snake_case=None , __snake_case = None , __snake_case = "eval" , **__snake_case , ) -> Optional[int]:
'''simple docstring'''
__a =gen_kwargs.copy()
__a =(
gen_kwargs["max_length"] if gen_kwargs.get('max_length' ) is not None else self.args.generation_max_length
)
__a =(
gen_kwargs["num_beams"] if gen_kwargs.get('num_beams' ) is not None else self.args.generation_num_beams
)
__a =gen_kwargs
__a =self.eval_dataset if eval_dataset is None else eval_dataset
__a =self.get_eval_dataloader(__snake_case )
__a =self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__a =self.compute_metrics
__a =None
__a =time.time()
__a =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__a =eval_loop(
__snake_case , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__snake_case , metric_key_prefix=__snake_case , )
finally:
__a =compute_metrics
__a =self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__snake_case , __snake_case , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__a =self.post_process_function(__snake_case , __snake_case , __snake_case )
__a =self.compute_metrics(__snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
__a =metrics.pop(__snake_case )
metrics.update(output.metrics )
else:
__a =output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__snake_case )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__a =self.callback_handler.on_evaluate(self.args , self.state , self.control , __snake_case )
return metrics
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case=None , __snake_case = "test" , **__snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a =gen_kwargs.copy()
__a =self.get_test_dataloader(__snake_case )
# Temporarily disable metric computation, we will do it in the loop here.
__a =self.compute_metrics
__a =None
__a =time.time()
__a =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__a =eval_loop(
__snake_case , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__snake_case , metric_key_prefix=__snake_case , )
finally:
__a =compute_metrics
__a =self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__snake_case , __snake_case , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__a =self.post_process_function(__snake_case , __snake_case , __snake_case , 'predict' )
__a =self.compute_metrics(__snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
__a =metrics.pop(__snake_case )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__snake_case )
| 242 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowercase ( __magic_name__="" ):
'''simple docstring'''
UpperCAmelCase : Dict = tempfile.mkdtemp()
return os.path.join(__magic_name__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
UpperCAmelCase : int = AgentAudio(snake_case )
UpperCAmelCase : str = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(snake_case ) )
# Ensure that the file contains the same value as the original tensor
UpperCAmelCase , UpperCAmelCase : str = sf.read(snake_case )
self.assertTrue(torch.allclose(snake_case , torch.tensor(snake_case ) , atol=1e-4 ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
UpperCAmelCase : Any = get_new_path(suffix=".wav" )
sf.write(snake_case , snake_case , 1_6_0_0_0 )
UpperCAmelCase : Optional[Any] = AgentAudio(snake_case )
self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , snake_case )
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) )
UpperCAmelCase : Tuple = AgentImage(snake_case )
UpperCAmelCase : Tuple = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
UpperCAmelCase : Any = Image.open(snake_case )
UpperCAmelCase : List[str] = AgentImage(snake_case )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
UpperCAmelCase : Dict = Image.open(snake_case )
UpperCAmelCase : int = AgentImage(snake_case )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = "Hey!"
UpperCAmelCase : Tuple = AgentText(snake_case )
self.assertEqual(snake_case , agent_type.to_string() )
self.assertEqual(snake_case , agent_type.to_raw() )
self.assertEqual(snake_case , snake_case )
| 679 | 0 |
from __future__ import annotations
from typing import Any
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = num_of_nodes
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = {}
def _snake_case ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] ):
self.m_edges.append([u_node, v_node, weight] )
def _snake_case ( self : Optional[int] , __lowerCamelCase : Dict ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _snake_case ( self : List[Any] , __lowerCamelCase : Dict ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
SCREAMING_SNAKE_CASE = self.find_component(__lowerCamelCase )
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any] ):
if component_size[u_node] <= component_size[v_node]:
SCREAMING_SNAKE_CASE = v_node
component_size[v_node] += component_size[u_node]
self.set_component(__lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
SCREAMING_SNAKE_CASE = self.find_component(__lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(__lowerCamelCase )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
SCREAMING_SNAKE_CASE = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
SCREAMING_SNAKE_CASE = edge
SCREAMING_SNAKE_CASE = self.m_component[u]
SCREAMING_SNAKE_CASE = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
SCREAMING_SNAKE_CASE = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = edge
SCREAMING_SNAKE_CASE = self.m_component[u]
SCREAMING_SNAKE_CASE = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
SCREAMING_SNAKE_CASE = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def __a ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | 16 |
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
def get_masked_lm_array(__magic_name__ ):
UpperCAmelCase : Tuple = F"masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : List[str] = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : str = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_array(__magic_name__ ):
UpperCAmelCase : List[Any] = F"encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : Optional[Any] = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : str = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_layer_array(__magic_name__ , __magic_name__ ):
UpperCAmelCase : Union[str, Any] = F"encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : int = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : Optional[int] = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_attention_layer_array(__magic_name__ , __magic_name__ , __magic_name__ ):
UpperCAmelCase : Tuple = F"encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : List[str] = tf.train.load_variable(__magic_name__ , __magic_name__ )
UpperCAmelCase : int = array.reshape(__magic_name__ )
if "kernel" in name:
UpperCAmelCase : Optional[Any] = array.transpose()
return torch.from_numpy(__magic_name__ )
print(F"Loading model based on config from {config_path}..." )
UpperCAmelCase : Optional[Any] = BertConfig.from_json_file(__magic_name__ )
UpperCAmelCase : Optional[Any] = BertForMaskedLM(__magic_name__ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
UpperCAmelCase : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
UpperCAmelCase : BertSelfAttention = layer.attention.self
UpperCAmelCase : List[Any] = get_encoder_attention_layer_array(
__magic_name__ , "_query_dense/kernel" , self_attn.query.weight.data.shape )
UpperCAmelCase : Tuple = get_encoder_attention_layer_array(
__magic_name__ , "_query_dense/bias" , self_attn.query.bias.data.shape )
UpperCAmelCase : int = get_encoder_attention_layer_array(
__magic_name__ , "_key_dense/kernel" , self_attn.key.weight.data.shape )
UpperCAmelCase : Optional[int] = get_encoder_attention_layer_array(
__magic_name__ , "_key_dense/bias" , self_attn.key.bias.data.shape )
UpperCAmelCase : Tuple = get_encoder_attention_layer_array(
__magic_name__ , "_value_dense/kernel" , self_attn.value.weight.data.shape )
UpperCAmelCase : str = get_encoder_attention_layer_array(
__magic_name__ , "_value_dense/bias" , self_attn.value.bias.data.shape )
# Self-attention Output
UpperCAmelCase : BertSelfOutput = layer.attention.output
UpperCAmelCase : str = get_encoder_attention_layer_array(
__magic_name__ , "_output_dense/kernel" , self_output.dense.weight.data.shape )
UpperCAmelCase : Union[str, Any] = get_encoder_attention_layer_array(
__magic_name__ , "_output_dense/bias" , self_output.dense.bias.data.shape )
UpperCAmelCase : str = get_encoder_layer_array(__magic_name__ , "_attention_layer_norm/gamma" )
UpperCAmelCase : List[str] = get_encoder_layer_array(__magic_name__ , "_attention_layer_norm/beta" )
# Intermediate
UpperCAmelCase : BertIntermediate = layer.intermediate
UpperCAmelCase : Dict = get_encoder_layer_array(__magic_name__ , "_intermediate_dense/kernel" )
UpperCAmelCase : Tuple = get_encoder_layer_array(__magic_name__ , "_intermediate_dense/bias" )
# Output
UpperCAmelCase : BertOutput = layer.output
UpperCAmelCase : Optional[Any] = get_encoder_layer_array(__magic_name__ , "_output_dense/kernel" )
UpperCAmelCase : Optional[Any] = get_encoder_layer_array(__magic_name__ , "_output_dense/bias" )
UpperCAmelCase : List[str] = get_encoder_layer_array(__magic_name__ , "_output_layer_norm/gamma" )
UpperCAmelCase : Any = get_encoder_layer_array(__magic_name__ , "_output_layer_norm/beta" )
# Embeddings
UpperCAmelCase : int = get_encoder_array("_position_embedding_layer/embeddings" )
UpperCAmelCase : str = get_encoder_array("_type_embedding_layer/embeddings" )
UpperCAmelCase : Optional[Any] = get_encoder_array("_embedding_norm_layer/gamma" )
UpperCAmelCase : Any = get_encoder_array("_embedding_norm_layer/beta" )
# LM Head
UpperCAmelCase : str = model.cls.predictions.transform
UpperCAmelCase : List[Any] = get_masked_lm_array("dense/kernel" )
UpperCAmelCase : List[Any] = get_masked_lm_array("dense/bias" )
UpperCAmelCase : Optional[Any] = get_masked_lm_array("layer_norm/gamma" )
UpperCAmelCase : Union[str, Any] = get_masked_lm_array("layer_norm/beta" )
UpperCAmelCase : Optional[Any] = get_masked_lm_array("embedding_table" )
# Pooling
UpperCAmelCase : str = BertPooler(config=__magic_name__ )
UpperCAmelCase : BertPooler = get_encoder_array("_pooler_layer/kernel" )
UpperCAmelCase : BertPooler = get_encoder_array("_pooler_layer/bias" )
# Export final model
model.save_pretrained(__magic_name__ )
# Integration test - should load without any errors ;)
UpperCAmelCase : Optional[int] = BertForMaskedLM.from_pretrained(__magic_name__ )
print(new_model.eval() )
print("Model conversion was done sucessfully!" )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
a : Any = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 679 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : str = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 542 |
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
a : str = "src/transformers"
# Matches is_xxx_available()
a : Union[str, Any] = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
a : int = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a : Any = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
a : Dict = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
a : Any = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a : List[str] = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
a : Union[str, Any] = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
a : List[str] = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
a : Any = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
a : Union[str, Any] = re.compile(R"^\s*try:")
# Catches a line with else:
a : Tuple = re.compile(R"^\s*else:")
def lowercase ( __magic_name__ ):
'''simple docstring'''
if _re_test_backend.search(__magic_name__ ) is None:
return None
UpperCAmelCase : Optional[int] = [b[0] for b in _re_backend.findall(__magic_name__ )]
backends.sort()
return "_and_".join(__magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
with open(__magic_name__ , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase : str = f.readlines()
UpperCAmelCase : Optional[int] = 0
while line_index < len(__magic_name__ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__magic_name__ ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCAmelCase : str = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
UpperCAmelCase : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__magic_name__ ):
UpperCAmelCase : int = _re_one_line_import_struct.search(__magic_name__ ).groups()[0]
UpperCAmelCase : Any = re.findall("\[([^\]]+)\]" , __magic_name__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
UpperCAmelCase : Optional[int] = _re_import_struct_key_value.search(__magic_name__ )
if single_line_import_search is not None:
UpperCAmelCase : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
UpperCAmelCase : Dict = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCAmelCase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase : List[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
UpperCAmelCase : List[str] = lines[line_index]
if _re_import_struct_add_one.search(__magic_name__ ) is not None:
objects.append(_re_import_struct_add_one.search(__magic_name__ ).groups()[0] )
elif _re_import_struct_add_many.search(__magic_name__ ) is not None:
UpperCAmelCase : List[str] = _re_import_struct_add_many.search(__magic_name__ ).groups()[0].split(", " )
UpperCAmelCase : int = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif _re_between_brackets.search(__magic_name__ ) is not None:
UpperCAmelCase : Optional[Any] = _re_between_brackets.search(__magic_name__ ).groups()[0].split(", " )
UpperCAmelCase : Optional[int] = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0]
objects.extend(__magic_name__ )
elif _re_quote_object.search(__magic_name__ ) is not None:
objects.append(_re_quote_object.search(__magic_name__ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
UpperCAmelCase : Optional[int] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCAmelCase : List[str] = []
while (
line_index < len(__magic_name__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
UpperCAmelCase : int = lines[line_index]
UpperCAmelCase : Tuple = _re_import.search(__magic_name__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCAmelCase : Optional[Any] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__magic_name__ ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCAmelCase : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
UpperCAmelCase : str = lines[line_index]
UpperCAmelCase : Tuple = _re_import.search(__magic_name__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCAmelCase : Dict = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
def find_duplicates(__magic_name__ ):
return [k for k, v in collections.Counter(__magic_name__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCAmelCase : Tuple = []
for key in import_dict_objects.keys():
UpperCAmelCase : List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
UpperCAmelCase : Any = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCAmelCase : List[Any] = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : int = []
for root, _, files in os.walk(__magic_name__ ):
if "__init__.py" in files:
UpperCAmelCase : Dict = os.path.join(__magic_name__ , "__init__.py" )
UpperCAmelCase : Optional[Any] = parse_init(__magic_name__ )
if objects is not None:
UpperCAmelCase : int = analyze_results(*__magic_name__ )
if len(__magic_name__ ) > 0:
UpperCAmelCase : Union[str, Any] = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(__magic_name__ ) )
if len(__magic_name__ ) > 0:
raise ValueError("\n\n".join(__magic_name__ ) )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = []
for path, directories, files in os.walk(__magic_name__ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__magic_name__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__magic_name__ ) / folder).glob("*.py" ) ) ) == 0:
continue
UpperCAmelCase : Any = str((Path(__magic_name__ ) / folder).relative_to(__magic_name__ ) )
UpperCAmelCase : Optional[Any] = short_path.replace(os.path.sep , "." )
submodules.append(__magic_name__ )
for fname in files:
if fname == "__init__.py":
continue
UpperCAmelCase : List[str] = str((Path(__magic_name__ ) / fname).relative_to(__magic_name__ ) )
UpperCAmelCase : str = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__magic_name__ )
return submodules
a : str = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = importlib.util.spec_from_file_location(
"transformers" , os.path.join(__magic_name__ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
UpperCAmelCase : Optional[int] = spec.loader.load_module()
UpperCAmelCase : Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__magic_name__ ) > 0:
UpperCAmelCase : List[str] = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 679 | 0 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCamelCase : Union[str, Any] = (3, 9, -11, 0, 7, 5, 1, -1)
lowerCamelCase : Tuple = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _UpperCamelCase :
snake_case_ = 42
snake_case_ = 42
class _UpperCamelCase :
def __init__( self , __UpperCamelCase )-> List[Any]:
__lowerCAmelCase = None
for i in sorted(__UpperCamelCase , reverse=__UpperCamelCase ):
__lowerCAmelCase = Node(__UpperCamelCase , self.head )
def __iter__( self )-> Tuple:
__lowerCAmelCase = self.head
while node:
yield node.data
__lowerCAmelCase = node.next_node
def __len__( self )-> int:
return sum(1 for _ in self )
def __str__( self )-> Optional[int]:
return " -> ".join([str(__UpperCamelCase ) for node in self] )
def __lowerCAmelCase ( __snake_case , __snake_case ):
return SortedLinkedList(list(__snake_case ) + list(__snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Union[str, Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 367 |
'''simple docstring'''
import os
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = os.path.dirname(os.path.realpath(__magic_name__ ) )
UpperCAmelCase : Any = os.path.join(__magic_name__ , "triangle.txt" )
with open(__magic_name__ ) as f:
UpperCAmelCase : str = f.readlines()
UpperCAmelCase : Optional[int] = []
for line in triangle:
UpperCAmelCase : List[str] = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(__magic_name__ ) )
a.append(__magic_name__ )
for i in range(1 , len(__magic_name__ ) ):
for j in range(len(a[i] ) ):
UpperCAmelCase : Union[str, Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0
UpperCAmelCase : List[str] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__magic_name__ , __magic_name__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 679 | 0 |
def lowerCAmelCase__ ( _a : Union[str, Any] ):
snake_case_ : Optional[int] = len(_a )
for _ in range(_a ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
snake_case_ : Optional[int] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
lowercase : Tuple = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 568 |
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if n == 1 or not isinstance(__magic_name__ , __magic_name__ ):
return 0
elif n == 2:
return 1
else:
UpperCAmelCase : Optional[int] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Union[str, Any] = 2
while digits < n:
index += 1
UpperCAmelCase : Any = len(str(fibonacci(__magic_name__ ) ) )
return index
def lowercase ( __magic_name__ = 1000 ):
'''simple docstring'''
return fibonacci_digits_index(__magic_name__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 679 | 0 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase_ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,):
'''simple docstring'''
A__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCAmelCase ) )
] # the reference grid
A__ = 1
A__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCAmelCase ) )
] # the action grid
A__ = init[0]
A__ = init[1]
A__ = 0
A__ = g + heuristic[x][y] # cost from starting cell to destination cell
A__ = [[f, g, x, y]]
A__ = False # flag that is set when search is complete
A__ = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCAmelCase ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
A__ = cell.pop()
A__ = next_cell[2]
A__ = next_cell[3]
A__ = next_cell[1]
if x == goal[0] and y == goal[1]:
A__ = True
else:
for i in range(len(UpperCAmelCase ) ): # to try out different valid actions
A__ = x + DIRECTIONS[i][0]
A__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
A__ = g + cost
A__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
A__ = 1
A__ = i
A__ = []
A__ = goal[0]
A__ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
A__ = x - DIRECTIONS[action[x][y]][0]
A__ = y - DIRECTIONS[action[x][y]][1]
A__ = xa
A__ = ya
invpath.append([x, y] )
A__ = []
for i in range(len(UpperCAmelCase ) ):
path.append(invpath[len(UpperCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
lowerCAmelCase_ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
lowerCAmelCase_ = [0, 0]
# all coordinates are given in format [y,x]
lowerCAmelCase_ = [len(grid) - 1, len(grid[0]) - 1]
lowerCAmelCase_ = 1
# the cost map which pushes the path closer to the goal
lowerCAmelCase_ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
lowerCAmelCase_ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
lowerCAmelCase_ = 9_9
lowerCAmelCase_ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 531 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
a : List[str] = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
a : Dict = {
"169M": 7_68,
"430M": 10_24,
"1B5": 20_48,
"3B": 25_60,
"7B": 40_96,
"14B": 51_20,
}
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = list(state_dict.keys() )
for name in state_dict_keys:
UpperCAmelCase : str = state_dict.pop(__magic_name__ )
# emb -> embedding
if name.startswith("emb." ):
UpperCAmelCase : str = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
UpperCAmelCase : int = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
UpperCAmelCase : Optional[int] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __magic_name__ )
# ffn -> feed_forward
UpperCAmelCase : Tuple = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __magic_name__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
UpperCAmelCase : Optional[Any] = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
UpperCAmelCase : List[str] = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
UpperCAmelCase : List[Any] = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
UpperCAmelCase : List[str] = "rwkv." + name
UpperCAmelCase : List[Any] = weight
return state_dict
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=False , __magic_name__=None ):
'''simple docstring'''
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
UpperCAmelCase : List[str] = 5_0277
UpperCAmelCase : str = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
UpperCAmelCase : List[Any] = PreTrainedTokenizerFast(tokenizer_file=__magic_name__ )
UpperCAmelCase : List[Any] = len(__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
# 2. Build the config
UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
UpperCAmelCase : Union[str, Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." )
UpperCAmelCase : str = RwkvConfig(
vocab_size=__magic_name__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__magic_name__ )
# 3. Download model file then convert state_dict
UpperCAmelCase : Union[str, Any] = hf_hub_download(__magic_name__ , __magic_name__ )
UpperCAmelCase : Optional[Any] = torch.load(__magic_name__ , map_location="cpu" )
UpperCAmelCase : Union[str, Any] = convert_state_dict(__magic_name__ )
# 4. Split in shards and save
UpperCAmelCase , UpperCAmelCase : Any = shard_checkpoint(__magic_name__ )
for shard_file, shard in shards.items():
torch.save(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) )
if index is not None:
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
# Save the index as well
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
UpperCAmelCase : List[Any] = json.dumps(__magic_name__ , indent=2 , sort_keys=__magic_name__ ) + "\n"
f.write(__magic_name__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
UpperCAmelCase : Any = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
UpperCAmelCase : Dict = torch.load(os.path.join(__magic_name__ , __magic_name__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__magic_name__ , __magic_name__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
UpperCAmelCase : int = AutoModelForCausalLM.from_pretrained(__magic_name__ )
model.push_to_hub(__magic_name__ , max_shard_size="2GB" )
tokenizer.push_to_hub(__magic_name__ )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
a : Dict = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 679 | 0 |
def _lowerCamelCase ( lowerCamelCase_: Union[str, Any] , lowerCamelCase_: Union[str, Any] , lowerCamelCase_: List[str] ):
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def _lowerCamelCase ( lowerCamelCase_: List[str] , lowerCamelCase_: int , lowerCamelCase_: Any ):
'''simple docstring'''
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def _lowerCamelCase ( lowerCamelCase_: Optional[Any] , lowerCamelCase_: Tuple , lowerCamelCase_: str ):
'''simple docstring'''
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def _lowerCamelCase ( lowerCamelCase_: Dict , lowerCamelCase_: List[Any] , lowerCamelCase_: Dict ):
'''simple docstring'''
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 256 |
'''simple docstring'''
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase : Optional[Any] = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
UpperCAmelCase : List[Any] = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
UpperCAmelCase : Dict = max(len(__magic_name__ ) , len(__magic_name__ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(__magic_name__ ) , b_binary.zfill(__magic_name__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679 | 0 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class UpperCamelCase_ :
_a : List[str]
_a : Optional[str] = None
# Automatically constructed
_a : ClassVar[str] = "dict"
_a : ClassVar[Any] = None
_a : str = field(default='Translation' , init=lowercase__ , repr=lowercase__ )
def __call__( self : Any ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __a ( self : Optional[int] ):
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class UpperCamelCase_ :
_a : Optional[List] = None
_a : Optional[int] = None
_a : Optional[str] = None
# Automatically constructed
_a : ClassVar[str] = "dict"
_a : ClassVar[Any] = None
_a : str = field(default='TranslationVariableLanguages' , init=lowercase__ , repr=lowercase__ )
def __a ( self : Any ):
lowerCamelCase_ : Optional[Any] = sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase_ : Tuple = len(self.languages ) if self.languages else None
def __call__( self : Any ):
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def __a ( self : Optional[int] , lowerCamelCase : Union[str, Any] ):
lowerCamelCase_ : List[Any] = set(self.languages )
if self.languages and set(lowerCamelCase ) - lang_set:
raise ValueError(
F"Some languages in example ({', '.join(sorted(set(lowerCamelCase ) - lang_set ) )}) are not in valid set ({', '.join(lowerCamelCase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase_ : List[Any] = []
for lang, text in translation_dict.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase_ : Tuple = zip(*sorted(lowerCamelCase ) )
return {"language": languages, "translation": translations}
def __a ( self : Union[str, Any] ):
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 364 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
a : Optional[Any] = "pt"
elif is_tf_available():
a : List[Any] = "tf"
else:
a : List[Any] = "jax"
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = PerceiverTokenizer
SCREAMING_SNAKE_CASE__ : List[str] = False
def A_ ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : List[str] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A_ ( self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def A_ ( self , **snake_case ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def A_ ( self , snake_case , snake_case=False , snake_case=2_0 , snake_case=5 ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = []
for i in range(len(snake_case ) ):
try:
UpperCAmelCase : int = tokenizer.decode([i] , clean_up_tokenization_spaces=snake_case )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCAmelCase : Optional[int] = list(filter(lambda snake_case : re.match(r"^[ a-zA-Z]+$" , t[1] ) , snake_case ) )
UpperCAmelCase : Any = list(filter(lambda snake_case : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=snake_case ) , snake_case ) )
if max_length is not None and len(snake_case ) > max_length:
UpperCAmelCase : Optional[Any] = toks[:max_length]
if min_length is not None and len(snake_case ) < min_length and len(snake_case ) > 0:
while len(snake_case ) < min_length:
UpperCAmelCase : Any = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase : Dict = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase : Any = tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case )
if " " not in output_txt and len(snake_case ) > 1:
UpperCAmelCase : Dict = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=snake_case )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=snake_case )
)
if with_prefix_space:
UpperCAmelCase : Union[str, Any] = " " + output_txt
UpperCAmelCase : Dict = tokenizer.encode(snake_case , add_special_tokens=snake_case )
return output_txt, output_ids
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.perceiver_tokenizer
UpperCAmelCase : Tuple = "Unicode €."
UpperCAmelCase : int = tokenizer(snake_case )
UpperCAmelCase : Tuple = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded["input_ids"] , snake_case )
# decoding
UpperCAmelCase : Optional[Any] = tokenizer.decode(snake_case )
self.assertEqual(snake_case , "[CLS]Unicode €.[SEP]" )
UpperCAmelCase : Tuple = tokenizer("e è é ê ë" )
UpperCAmelCase : str = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded["input_ids"] , snake_case )
# decoding
UpperCAmelCase : Dict = tokenizer.decode(snake_case )
self.assertEqual(snake_case , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.perceiver_tokenizer
UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
UpperCAmelCase : List[str] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
UpperCAmelCase : Dict = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case )
self.assertIsInstance(snake_case , snake_case )
if FRAMEWORK != "jax":
UpperCAmelCase : List[Any] = list(batch.input_ids.numpy()[0] )
else:
UpperCAmelCase : str = list(batch.input_ids.tolist()[0] )
self.assertListEqual(snake_case , snake_case )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.perceiver_tokenizer
UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase : List[Any] = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , snake_case )
self.assertIn("attention_mask" , snake_case )
self.assertNotIn("decoder_input_ids" , snake_case )
self.assertNotIn("decoder_attention_mask" , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.perceiver_tokenizer
UpperCAmelCase : int = [
"Summary of the text.",
"Another summary.",
]
UpperCAmelCase : List[Any] = tokenizer(
text_target=snake_case , max_length=3_2 , padding="max_length" , truncation=snake_case , return_tensors=snake_case )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
UpperCAmelCase : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase : Dict = tempfile.mkdtemp()
UpperCAmelCase : Any = " He is very happy, UNwant\u00E9d,running"
UpperCAmelCase : int = tokenizer.encode(snake_case , add_special_tokens=snake_case )
tokenizer.save_pretrained(snake_case )
UpperCAmelCase : List[str] = tokenizer.__class__.from_pretrained(snake_case )
UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
shutil.rmtree(snake_case )
UpperCAmelCase : Dict = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase : str = tempfile.mkdtemp()
UpperCAmelCase : int = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
UpperCAmelCase : int = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase : List[str] = tokenizer.encode(snake_case , add_special_tokens=snake_case )
tokenizer.save_pretrained(snake_case )
UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(snake_case )
UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
UpperCAmelCase : Optional[int] = tokenizer.__class__.from_pretrained(snake_case , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(snake_case )
with open(os.path.join(snake_case , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase : Union[str, Any] = json.load(snake_case )
with open(os.path.join(snake_case , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase : Any = json.load(snake_case )
UpperCAmelCase : str = [f"<extra_id_{i}>" for i in range(1_2_5 )]
UpperCAmelCase : List[Any] = added_tokens_extra_ids + [
"an_additional_special_token"
]
UpperCAmelCase : List[str] = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(snake_case , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case , snake_case )
with open(os.path.join(snake_case , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case , snake_case )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase : Optional[Any] = tokenizer_class.from_pretrained(
snake_case , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase : Optional[int] = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=snake_case )]
UpperCAmelCase : Optional[int] = tokenizer_class.from_pretrained(
snake_case , additional_special_tokens=snake_case , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , "�" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.get_tokenizers(fast=snake_case , do_lower_case=snake_case )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase : List[Any] = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
UpperCAmelCase : int = tokenizer.convert_tokens_to_string(snake_case )
self.assertIsInstance(snake_case , snake_case )
| 679 | 0 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = MPNetModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase = MPNetForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , start_positions=SCREAMING_SNAKE_CASE , end_positions=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = MPNetForSequenceClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.num_choices
UpperCamelCase = MPNetForMultipleChoice(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = MPNetForTokenClassification(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(UpperCamelCase) = config_and_inputs
UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
lowercase = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
lowercase = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = True
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = MPNetModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*SCREAMING_SNAKE_CASE )
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = MPNetModel.from_pretrained("microsoft/mpnet-base" )
UpperCamelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
UpperCamelCase = model(SCREAMING_SNAKE_CASE )[0]
UpperCamelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor(
[[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 606 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : str = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = "efficientformer"
def __init__( self , snake_case = [3, 2, 6, 4] , snake_case = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case = [True, True, True, True] , snake_case = 4_4_8 , snake_case = 3_2 , snake_case = 4 , snake_case = 7 , snake_case = 5 , snake_case = 8 , snake_case = 4 , snake_case = 0.0 , snake_case = 1_6 , snake_case = 3 , snake_case = 3 , snake_case = 3 , snake_case = 2 , snake_case = 1 , snake_case = 0.0 , snake_case = 1 , snake_case = True , snake_case = True , snake_case = 1e-5 , snake_case = "gelu" , snake_case = 0.02 , snake_case = 1e-12 , snake_case = 2_2_4 , snake_case = 1e-05 , **snake_case , ):
'''simple docstring'''
super().__init__(**snake_case )
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Optional[Any] = hidden_dropout_prob
UpperCAmelCase : List[Any] = hidden_sizes
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : int = patch_size
UpperCAmelCase : Optional[int] = num_channels
UpperCAmelCase : Any = depths
UpperCAmelCase : Dict = mlp_expansion_ratio
UpperCAmelCase : List[str] = downsamples
UpperCAmelCase : List[Any] = dim
UpperCAmelCase : Any = key_dim
UpperCAmelCase : List[str] = attention_ratio
UpperCAmelCase : Union[str, Any] = resolution
UpperCAmelCase : List[str] = pool_size
UpperCAmelCase : Dict = downsample_patch_size
UpperCAmelCase : Optional[int] = downsample_stride
UpperCAmelCase : Any = downsample_pad
UpperCAmelCase : int = drop_path_rate
UpperCAmelCase : Optional[Any] = num_metaad_blocks
UpperCAmelCase : List[str] = distillation
UpperCAmelCase : int = use_layer_scale
UpperCAmelCase : List[str] = layer_scale_init_value
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Any = batch_norm_eps
| 679 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
# TODO: upload to AWS
_lowerCAmelCase = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class __UpperCamelCase ( lowercase__ ):
_UpperCAmelCase = "retribert"
def __init__( self ,_A=3_0522 ,_A=768 ,_A=8 ,_A=12 ,_A=3072 ,_A="gelu" ,_A=0.1 ,_A=0.1 ,_A=512 ,_A=2 ,_A=0.0_2 ,_A=1E-12 ,_A=True ,_A=128 ,_A=0 ,**_A ,):
'''simple docstring'''
super().__init__(pad_token_id=_A ,**_A )
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Dict = layer_norm_eps
_lowerCAmelCase : Optional[Any] = share_encoders
_lowerCAmelCase : Any = projection_dim
| 259 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=3 , snake_case=3_2 , snake_case=3 , snake_case=1_0 , snake_case=[1_0, 2_0, 3_0, 4_0] , snake_case=[1, 1, 2, 1] , snake_case=True , snake_case=True , snake_case="relu" , snake_case=3 , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : Dict = parent
UpperCAmelCase : int = batch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Union[str, Any] = num_channels
UpperCAmelCase : List[str] = embeddings_size
UpperCAmelCase : Any = hidden_sizes
UpperCAmelCase : int = depths
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : List[str] = use_labels
UpperCAmelCase : int = hidden_act
UpperCAmelCase : Union[str, Any] = num_labels
UpperCAmelCase : str = scope
UpperCAmelCase : str = len(snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def A_ ( self ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = TFResNetModel(config=snake_case )
UpperCAmelCase : int = model(snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.num_labels
UpperCAmelCase : List[Any] = TFResNetForImageClassification(snake_case )
UpperCAmelCase : Union[str, Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = config_and_inputs
UpperCAmelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ : Optional[int] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = TFResNetModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case )
def A_ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self ):
'''simple docstring'''
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def A_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(snake_case )
UpperCAmelCase : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : List[str] = [*signature.parameters.keys()]
UpperCAmelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case , snake_case , snake_case ):
UpperCAmelCase : Optional[Any] = model_class(snake_case )
UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : List[str] = self.model_tester.num_stages
self.assertEqual(len(snake_case ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[int] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase : str = layer_type
UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : str = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def A_ ( self ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Any = TFResNetModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase : Union[str, Any] = self.default_image_processor
UpperCAmelCase : Tuple = prepare_img()
UpperCAmelCase : str = image_processor(images=snake_case , return_tensors="tf" )
# forward pass
UpperCAmelCase : Any = model(**snake_case )
# verify the logits
UpperCAmelCase : Any = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCAmelCase : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case , atol=1e-4 ) )
| 679 | 0 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def a_ ( __lowerCAmelCase ):
if not is_accelerate_available():
return method
lowerCAmelCase__ = version.parse(accelerate.__version__ ).base_version
if version.parse(__lowerCAmelCase ) < version.parse('''0.17.0''' ):
return method
def wrapper(self , *__lowerCAmelCase , **__lowerCAmelCase ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *__lowerCAmelCase , **__lowerCAmelCase )
return wrapper
| 615 |
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=9_9 , snake_case=6_4 , snake_case=5 , snake_case=4 , snake_case=6_4 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : List[Any] = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : int = seq_length
UpperCAmelCase : Dict = is_training
UpperCAmelCase : Optional[Any] = use_input_mask
UpperCAmelCase : Optional[Any] = use_token_type_ids
UpperCAmelCase : Optional[Any] = use_labels
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[str] = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Tuple = attention_probs_dropout_prob
UpperCAmelCase : Any = max_position_embeddings
UpperCAmelCase : Tuple = type_vocab_size
UpperCAmelCase : Union[str, Any] = type_sequence_label_size
UpperCAmelCase : int = initializer_range
UpperCAmelCase : Dict = num_labels
UpperCAmelCase : Union[str, Any] = num_choices
UpperCAmelCase : List[Any] = scope
def A_ ( self ):
'''simple docstring'''
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Any = None
if self.use_input_mask:
UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : str = None
UpperCAmelCase : Dict = None
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self ):
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MPNetModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Dict = model(snake_case , snake_case )
UpperCAmelCase : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : int = MPNetForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Dict = model(
snake_case , attention_mask=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : Optional[int] = MPNetForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Optional[int] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.num_choices
UpperCAmelCase : Optional[int] = MPNetForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Tuple = model(
snake_case , attention_mask=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.num_labels
UpperCAmelCase : Tuple = MPNetForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : str = config_and_inputs
UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Any = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : str = True
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MPNetModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=3_7 )
def A_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*snake_case )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = MPNetModel.from_pretrained("microsoft/mpnet-base" )
UpperCAmelCase : Optional[int] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCAmelCase : Optional[Any] = model(snake_case )[0]
UpperCAmelCase : Optional[int] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , snake_case )
UpperCAmelCase : Optional[Any] = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
| 679 | 0 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def UpperCamelCase_( _snake_case : int , _snake_case : int , _snake_case : Any ):
"""simple docstring"""
__a =AutoConfig.from_pretrained(_snake_case )
__a =FlaxAutoModelForSeqaSeqLM.from_config(config=_snake_case )
__a =checkpoints.load_tax_checkpoint(_snake_case )
__a ="wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
__a ="SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
__a ="LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__a ="TransientGlobalSelfAttention"
else:
raise ValueError(
'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'
' attribute with a value from [\'local\', \'transient-global].' )
# Encoder
for layer_index in range(config.num_layers ):
__a =F'layers_{str(_snake_case )}'
# Self-Attention
__a =tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
__a =tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
__a =tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
__a =tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__a =tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
__a =tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
__a =tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
__a =tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
__a =tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
__a =tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
__a =tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
__a =flax_model.params["encoder"]["block"][str(_snake_case )]["layer"]
__a =tax_attention_key
__a =tax_attention_out
__a =tax_attention_query
__a =tax_attention_value
__a =tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__a =tax_global_layer_norm
if split_mlp_wi:
__a =tax_mlp_wi_a
__a =tax_mlp_wi_a
else:
__a =tax_mlp_wi
__a =tax_mlp_wo
__a =tax_mlp_layer_norm
__a =flax_model_encoder_layer_block
# Only for layer 0:
__a =tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
__a =tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__a =tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
__a =tax_encoder_global_rel_embedding
# Assigning
__a =tax_model["target"]["encoder"]["encoder_norm"]["scale"]
__a =tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
__a =F'layers_{str(_snake_case )}'
# Self-Attention
__a =tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
__a =tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
__a =tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
__a =tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
__a =tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
__a =tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
__a =tax_enc_dec_attention_module["key"]["kernel"]
__a =tax_enc_dec_attention_module["out"]["kernel"]
__a =tax_enc_dec_attention_module["query"]["kernel"]
__a =tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
__a =tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
__a =tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
__a =tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
__a =tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
__a =tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
__a =tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
__a =flax_model.params["decoder"]["block"][str(_snake_case )]["layer"]
__a =tax_attention_key
__a =tax_attention_out
__a =tax_attention_query
__a =tax_attention_value
__a =tax_pre_attention_layer_norm
__a =tax_enc_dec_attention_key
__a =tax_enc_dec_attention_out
__a =tax_enc_dec_attention_query
__a =tax_enc_dec_attention_value
__a =tax_cross_layer_norm
if split_mlp_wi:
__a =tax_mlp_wi_a
__a =tax_mlp_wi_a
else:
__a =tax_mlp_wi
__a =tax_mlp_wo
__a =txa_mlp_layer_norm
__a =flax_model_decoder_layer_block
# Decoder Normalization
__a =tax_model["target"]["decoder"]["decoder_norm"]["scale"]
__a =txa_decoder_norm
# Only for layer 0:
__a =tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
__a =tax_decoder_rel_embedding
# Token Embeddings
__a =tax_model["target"]["token_embedder"]["embedding"]
__a =txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
__a =tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(_snake_case )
print('T5X Model was sucessfully converted!' )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
_lowerCAmelCase : Dict = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 242 |
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a : Optional[Any] = logging.get_logger(__name__)
a : List[str] = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
UpperCAmelCase : List[str] = TOKENIZER_CLASSES
else:
UpperCAmelCase : int = {tokenizer_name: getattr(__magic_name__ , tokenizer_name + "Fast" )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
UpperCAmelCase : Tuple = TOKENIZER_CLASSES[tokenizer_name]
UpperCAmelCase : Union[str, Any] = True
if checkpoint_name is None:
UpperCAmelCase : List[str] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
UpperCAmelCase : Dict = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
UpperCAmelCase : Union[str, Any] = tokenizer_class.from_pretrained(__magic_name__ , force_download=__magic_name__ )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCAmelCase , UpperCAmelCase : Dict = checkpoint.split("/" )
UpperCAmelCase : Optional[int] = os.path.join(__magic_name__ , __magic_name__ )
elif add_prefix:
UpperCAmelCase : List[Any] = checkpoint
UpperCAmelCase : str = dump_path
else:
UpperCAmelCase : List[str] = None
UpperCAmelCase : List[Any] = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
UpperCAmelCase : List[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
UpperCAmelCase : List[Any] = file_path.split(__magic_name__ )[-1][0]
if next_char == "/":
UpperCAmelCase : str = os.path.join(__magic_name__ , __magic_name__ )
UpperCAmelCase : Dict = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
UpperCAmelCase : Any = tokenizer.save_pretrained(
__magic_name__ , legacy_format=__magic_name__ , filename_prefix=__magic_name__ )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(__magic_name__ )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
a : Any = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 679 | 0 |
def __a ( A__ : List[str] ):
for i in range(0 , A__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(" " , end="" )
for _ in range(0 , i + 1 ): # printing stars
print("* " , end="" )
print()
def __a ( A__ : str ):
for i in range(A__ , 0 , -1 ):
for _ in range(A__ , 0 , -1 ): # printing stars
print("* " , end="" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(" " , end="" )
def __a ( A__ : List[str] ):
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(A__ ) # upper half
reverse_floyd(A__ ) # lower half
if __name__ == "__main__":
print(r'| /\ | |- | |- |--| |\ /| |-')
print(r'|/ \| |- |_ |_ |__| | \/ | |_')
__A : Optional[Any] = 1
while K:
__A : Optional[Any] = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
__A : Union[str, Any] = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...') | 16 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "dandelin/vilt-b32-finetuned-vqa"
SCREAMING_SNAKE_CASE__ : Dict = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
SCREAMING_SNAKE_CASE__ : List[str] = "image_qa"
SCREAMING_SNAKE_CASE__ : int = AutoProcessor
SCREAMING_SNAKE_CASE__ : Tuple = AutoModelForVisualQuestionAnswering
SCREAMING_SNAKE_CASE__ : Any = ["image", "text"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["text"]
def __init__( self , *snake_case , **snake_case ):
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*snake_case , **snake_case )
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
return self.pre_processor(snake_case , snake_case , return_tensors="pt" )
def A_ ( self , snake_case ):
'''simple docstring'''
with torch.no_grad():
return self.model(**snake_case ).logits
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Any = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 679 | 0 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowercase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> List[Any]:
A : Tuple = parent
A : str = batch_size
A : Any = seq_length
A : Tuple = is_training
A : Any = use_input_mask
A : str = use_token_type_ids
A : List[Any] = use_labels
A : Optional[int] = vocab_size
A : Optional[int] = hidden_size
A : Dict = num_hidden_layers
A : Any = num_attention_heads
A : Tuple = intermediate_size
A : List[str] = hidden_act
A : Dict = hidden_dropout_prob
A : Union[str, Any] = attention_probs_dropout_prob
A : List[str] = max_position_embeddings
A : List[str] = type_vocab_size
A : List[str] = type_sequence_label_size
A : Tuple = initializer_range
A : Any = num_labels
A : Tuple = num_choices
A : Union[str, Any] = scope
def snake_case ( self ) -> List[Any]:
A : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : int = None
if self.use_input_mask:
A : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A : Union[str, Any] = None
if self.use_token_type_ids:
A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A : Dict = None
A : Any = None
A : List[Any] = None
if self.use_labels:
A : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A : List[str] = ids_tensor([self.batch_size] , self.num_choices )
A : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ) -> str:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , use_stable_embedding=__UpperCAmelCase , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
A : Optional[Any] = OpenLlamaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
A : Optional[int] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Tuple:
A : Optional[int] = True
A : Optional[Any] = OpenLlamaModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A : Optional[Any] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
A : Any = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
A : Optional[int] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> str:
A : int = OpenLlamaForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]:
A : Optional[Any] = True
A : int = True
A : Optional[Any] = OpenLlamaForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
A : Union[str, Any] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , )
A : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
A : Any = torch.cat([input_mask, next_mask] , dim=-1 )
A : Dict = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
A : Union[str, Any] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
# select random slice
A : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A : str = output_from_no_past[:, -3:, random_slice_idx].detach()
A : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
def snake_case ( self ) -> List[str]:
A : Optional[int] = self.prepare_config_and_inputs()
(
A
) : List[Any] = config_and_inputs
A : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Any = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
UpperCAmelCase_ : List[Any] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
UpperCAmelCase_ : List[Any] = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : str = False
def snake_case ( self ) -> Dict:
A : int = OpenLlamaModelTester(self )
A : Dict = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def snake_case ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def snake_case ( self ) -> Any:
A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def snake_case ( self ) -> int:
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A : Union[str, Any] = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def snake_case ( self ) -> Tuple:
A : Any = self.model_tester.prepare_config_and_inputs_for_common()
A : Optional[Any] = 3
A : List[Any] = input_dict["input_ids"]
A : Optional[Any] = input_ids.ne(1 ).to(__UpperCAmelCase )
A : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A : Union[str, Any] = OpenLlamaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A : str = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ) -> Tuple:
A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A : Optional[Any] = 3
A : Dict = "single_label_classification"
A : Tuple = input_dict["input_ids"]
A : int = input_ids.ne(1 ).to(__UpperCAmelCase )
A : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A : Tuple = OpenLlamaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A : Any = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ) -> Optional[int]:
A : int = self.model_tester.prepare_config_and_inputs_for_common()
A : List[str] = 3
A : Optional[Any] = "multi_label_classification"
A : List[str] = input_dict["input_ids"]
A : Any = input_ids.ne(1 ).to(__UpperCAmelCase )
A : Dict = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A : Tuple = OpenLlamaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A : str = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def snake_case ( self ) -> Dict:
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def snake_case ( self , __UpperCAmelCase ) -> Union[str, Any]:
A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A : Any = ids_tensor([1, 10] , config.vocab_size )
A : Any = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A : str = OpenLlamaModel(__UpperCAmelCase )
original_model.to(__UpperCAmelCase )
original_model.eval()
A : Union[str, Any] = original_model(__UpperCAmelCase ).last_hidden_state
A : Optional[Any] = original_model(__UpperCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A : Union[str, Any] = {"type": scaling_type, "factor": 1_0.0}
A : Union[str, Any] = OpenLlamaModel(__UpperCAmelCase )
scaled_model.to(__UpperCAmelCase )
scaled_model.eval()
A : str = scaled_model(__UpperCAmelCase ).last_hidden_state
A : int = scaled_model(__UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
| 542 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
a : Optional[int] = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = R"\w+[.]\d+"
UpperCAmelCase : Dict = re.findall(__magic_name__ , __magic_name__ )
for pat in pats:
UpperCAmelCase : Tuple = key.replace(__magic_name__ , "_".join(pat.split("." ) ) )
return key
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase : Dict = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase : int = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase : Union[str, Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=42 ):
'''simple docstring'''
UpperCAmelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase : Tuple = flax_model.init_weights(PRNGKey(__magic_name__ ) )
UpperCAmelCase : Optional[Any] = flatten_dict(__magic_name__ )
UpperCAmelCase : List[str] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase : Tuple = rename_key(__magic_name__ )
UpperCAmelCase : List[str] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
UpperCAmelCase , UpperCAmelCase : Optional[int] = rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
UpperCAmelCase : Optional[int] = jnp.asarray(__magic_name__ )
return unflatten_dict(__magic_name__ )
| 679 | 0 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowerCAmelCase ( __snake_case ):
if not isinstance(__snake_case , __snake_case ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
__lowerCAmelCase = precision
__lowerCAmelCase = ceil(precision / 14 )
__lowerCAmelCase = 42_6880 * Decimal(1_0005 ).sqrt()
__lowerCAmelCase = 1
__lowerCAmelCase = 1359_1409
__lowerCAmelCase = Decimal(__snake_case )
for k in range(1 , __snake_case ):
__lowerCAmelCase = factorial(6 * k ) // (factorial(3 * k ) * factorial(__snake_case ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowerCamelCase : Any = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 367 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = (EulerDiscreteScheduler,)
SCREAMING_SNAKE_CASE__ : List[Any] = 10
def A_ ( self , **snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = {
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**snake_case )
return config
def A_ ( self ):
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case )
def A_ ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case , beta_end=snake_case )
def A_ ( self ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case )
def A_ ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase : Union[str, Any] = self.get_scheduler_config()
UpperCAmelCase : Optional[Any] = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = self.dummy_model()
UpperCAmelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : Any = sample.to(snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : Tuple = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : List[Any] = model(snake_case , snake_case )
UpperCAmelCase : str = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Dict = output.prev_sample
UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : List[Any] = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase : int = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCAmelCase : List[Any] = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = self.dummy_model()
UpperCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : int = sample.to(snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : str = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : Dict = model(snake_case , snake_case )
UpperCAmelCase : List[Any] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Any = output.prev_sample
UpperCAmelCase : Optional[int] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase : Optional[int] = self.get_scheduler_config()
UpperCAmelCase : Any = scheduler_class(**snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : int = self.dummy_model()
UpperCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase : str = sample.to(snake_case )
for t in scheduler.timesteps:
UpperCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : List[Any] = model(snake_case , snake_case )
UpperCAmelCase : List[str] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : Dict = output.prev_sample
UpperCAmelCase : Optional[int] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.scheduler_classes[0]
UpperCAmelCase : Tuple = self.get_scheduler_config()
UpperCAmelCase : Dict = scheduler_class(**snake_case , use_karras_sigmas=snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case )
UpperCAmelCase : List[str] = torch.manual_seed(0 )
UpperCAmelCase : Any = self.dummy_model()
UpperCAmelCase : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase : List[str] = sample.to(snake_case )
for t in scheduler.timesteps:
UpperCAmelCase : str = scheduler.scale_model_input(snake_case , snake_case )
UpperCAmelCase : Dict = model(snake_case , snake_case )
UpperCAmelCase : Dict = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case )
UpperCAmelCase : List[str] = output.prev_sample
UpperCAmelCase : int = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
| 679 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : Any = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
A : Optional[int] = "roberta"
def __init__( self , _SCREAMING_SNAKE_CASE=5_0265 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-12 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Dict:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
snake_case_ : Any = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : Dict = hidden_act
snake_case_ : str = intermediate_size
snake_case_ : int = hidden_dropout_prob
snake_case_ : List[str] = attention_probs_dropout_prob
snake_case_ : Tuple = max_position_embeddings
snake_case_ : str = type_vocab_size
snake_case_ : int = initializer_range
snake_case_ : int = layer_norm_eps
snake_case_ : int = position_embedding_type
snake_case_ : Dict = use_cache
snake_case_ : Any = classifier_dropout
class UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self ) -> Optional[Any]:
if self.task == "multiple-choice":
snake_case_ : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case_ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 568 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def A_ ( self , snake_case ):
'''simple docstring'''
with open(snake_case , encoding="utf-8" ) as input_file:
UpperCAmelCase : Dict = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
UpperCAmelCase : Tuple = input_file.read()
UpperCAmelCase : List[Any] = regexp.search(snake_case )
return match
def A_ ( self , snake_case ):
'''simple docstring'''
with open(snake_case , encoding="utf-8" ) as input_file:
UpperCAmelCase : List[str] = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
UpperCAmelCase : List[Any] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase : str = regexp.finditer(snake_case )
UpperCAmelCase : Union[str, Any] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = Path("./datasets" )
UpperCAmelCase : Optional[int] = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(snake_case ) ):
raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Path("./datasets" )
UpperCAmelCase : Any = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(snake_case ) ):
raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 679 | 0 |
'''simple docstring'''
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,):
'''simple docstring'''
A__ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
A__ = 1 - (matter_density + radiation_density + dark_energy)
A__ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
A__ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCAmelCase_ = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 531 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a : str = logging.getLogger(__name__)
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def A_ ( self , snake_case , snake_case , snake_case=None , snake_case=None ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.layer[current_layer](snake_case , snake_case , head_mask[current_layer] )
UpperCAmelCase : Optional[int] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , lowercase__ , )
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__(snake_case )
UpperCAmelCase : Dict = BertEncoderWithPabee(snake_case )
self.init_weights()
UpperCAmelCase : int = 0
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : List[Any] = 0
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = threshold
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = patience
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = 0
UpperCAmelCase : List[Any] = 0
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.inference_layers_num / self.inference_instances_num
UpperCAmelCase : List[Any] = (
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(snake_case )
@add_start_docstrings_to_model_forward(snake_case )
def A_ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=False , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
UpperCAmelCase : Dict = input_ids.size()
elif inputs_embeds is not None:
UpperCAmelCase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
UpperCAmelCase : Optional[int] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCAmelCase : Tuple = torch.ones(snake_case , device=snake_case )
if token_type_ids is None:
UpperCAmelCase : List[Any] = torch.zeros(snake_case , dtype=torch.long , device=snake_case )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCAmelCase : torch.Tensor = self.get_extended_attention_mask(snake_case , snake_case , snake_case )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = encoder_hidden_states.size()
UpperCAmelCase : List[str] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
UpperCAmelCase : int = torch.ones(snake_case , device=snake_case )
UpperCAmelCase : str = self.invert_attention_mask(snake_case )
else:
UpperCAmelCase : int = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCAmelCase : Dict = self.get_head_mask(snake_case , self.config.num_hidden_layers )
UpperCAmelCase : Tuple = self.embeddings(
input_ids=snake_case , position_ids=snake_case , token_type_ids=snake_case , inputs_embeds=snake_case )
UpperCAmelCase : int = embedding_output
if self.training:
UpperCAmelCase : int = []
for i in range(self.config.num_hidden_layers ):
UpperCAmelCase : List[Any] = self.encoder.adaptive_forward(
snake_case , current_layer=snake_case , attention_mask=snake_case , head_mask=snake_case )
UpperCAmelCase : Dict = self.pooler(snake_case )
UpperCAmelCase : List[Any] = output_layers[i](output_dropout(snake_case ) )
res.append(snake_case )
elif self.patience == 0: # Use all layers for inference
UpperCAmelCase : Union[str, Any] = self.encoder(
snake_case , attention_mask=snake_case , head_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
UpperCAmelCase : Optional[int] = self.pooler(encoder_outputs[0] )
UpperCAmelCase : List[str] = [output_layers[self.config.num_hidden_layers - 1](snake_case )]
else:
UpperCAmelCase : int = 0
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Optional[Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
UpperCAmelCase : Tuple = self.encoder.adaptive_forward(
snake_case , current_layer=snake_case , attention_mask=snake_case , head_mask=snake_case )
UpperCAmelCase : Any = self.pooler(snake_case )
UpperCAmelCase : int = output_layers[i](snake_case )
if regression:
UpperCAmelCase : Optional[Any] = logits.detach()
if patient_result is not None:
UpperCAmelCase : Union[str, Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
UpperCAmelCase : Optional[Any] = 0
else:
UpperCAmelCase : Any = logits.detach().argmax(dim=1 )
if patient_result is not None:
UpperCAmelCase : Tuple = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(snake_case ) ):
patient_counter += 1
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = logits
if patient_counter == self.patience:
break
UpperCAmelCase : int = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , lowercase__ , )
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__(snake_case )
UpperCAmelCase : Union[str, Any] = config.num_labels
UpperCAmelCase : Optional[Any] = BertModelWithPabee(snake_case )
UpperCAmelCase : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase : Any = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(snake_case )
def A_ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : int = self.bert(
input_ids=snake_case , attention_mask=snake_case , token_type_ids=snake_case , position_ids=snake_case , head_mask=snake_case , inputs_embeds=snake_case , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
UpperCAmelCase : Tuple = (logits[-1],)
if labels is not None:
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : List[Any] = 0
for ix, logits_item in enumerate(snake_case ):
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase : Dict = MSELoss()
UpperCAmelCase : Union[str, Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase : Optional[int] = CrossEntropyLoss()
UpperCAmelCase : Tuple = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
UpperCAmelCase : int = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
UpperCAmelCase : Tuple = (total_loss / total_weights,) + outputs
return outputs
| 679 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = ["pixel_values"]
def __init__( self : Union[str, Any] , snake_case_ : Optional[int] = True , snake_case_ : str = None , snake_case_ : Dict = PILImageResampling.BICUBIC , snake_case_ : str = True , snake_case_ : Dict = None , snake_case_ : Optional[int] = True , snake_case_ : int = 1 / 255 , snake_case_ : List[Any] = True , snake_case_ : Tuple = None , snake_case_ : int = None , snake_case_ : str = True , **snake_case_ : List[Any] , ):
"""simple docstring"""
super().__init__(**snake_case_ )
A : Optional[Any] = size if size is not None else {"shortest_edge": 224}
A : Optional[int] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
A : Optional[Any] = crop_size if crop_size is not None else {"height": 224, "width": 224}
A : Dict = get_size_dict(snake_case_ , default_to_square=snake_case_ , param_name='''crop_size''' )
A : int = do_resize
A : Optional[Any] = size
A : List[str] = resample
A : Dict = do_center_crop
A : List[Any] = crop_size
A : Optional[Any] = do_rescale
A : List[str] = rescale_factor
A : str = do_normalize
A : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
A : List[Any] = do_convert_rgb
def _UpperCAmelCase ( self : Union[str, Any] , snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : Any = PILImageResampling.BICUBIC , snake_case_ : str = None , **snake_case_ : int , ):
"""simple docstring"""
A : Dict = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
A : Optional[Any] = get_resize_output_image_size(snake_case_ , size=size['''shortest_edge'''] , default_to_square=snake_case_ )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def _UpperCAmelCase ( self : Tuple , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : str = None , **snake_case_ : Dict , ):
"""simple docstring"""
A : Tuple = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(snake_case_ , size=(size['''height'''], size['''width''']) , data_format=snake_case_ , **snake_case_ )
def _UpperCAmelCase ( self : Optional[int] , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : Dict = None , **snake_case_ : str , ):
"""simple docstring"""
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def _UpperCAmelCase ( self : Any , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : List[Any] = None , **snake_case_ : List[str] , ):
"""simple docstring"""
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def _UpperCAmelCase ( self : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Optional[Any] = None , snake_case_ : Any = None , snake_case_ : int = None , snake_case_ : Tuple = None , snake_case_ : Tuple = None , snake_case_ : str = None , snake_case_ : Any = None , snake_case_ : int = None , snake_case_ : List[str] = None , snake_case_ : Dict = None , snake_case_ : Tuple = None , snake_case_ : str = None , snake_case_ : Dict = ChannelDimension.FIRST , **snake_case_ : Tuple , ):
"""simple docstring"""
A : str = do_resize if do_resize is not None else self.do_resize
A : Dict = size if size is not None else self.size
A : Dict = get_size_dict(snake_case_ , param_name='''size''' , default_to_square=snake_case_ )
A : Any = resample if resample is not None else self.resample
A : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
A : List[str] = crop_size if crop_size is not None else self.crop_size
A : Union[str, Any] = get_size_dict(snake_case_ , param_name='''crop_size''' , default_to_square=snake_case_ )
A : Tuple = do_rescale if do_rescale is not None else self.do_rescale
A : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
A : str = do_normalize if do_normalize is not None else self.do_normalize
A : List[str] = image_mean if image_mean is not None else self.image_mean
A : str = image_std if image_std is not None else self.image_std
A : Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A : List[Any] = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A : int = [convert_to_rgb(snake_case_ ) for image in images]
# All transformations expect numpy arrays.
A : Tuple = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
A : Any = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_center_crop:
A : Union[str, Any] = [self.center_crop(image=snake_case_ , size=snake_case_ ) for image in images]
if do_rescale:
A : Optional[Any] = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
A : Dict = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
A : Dict = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
A : str = {"pixel_values": images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ ) | 256 |
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : Tuple = tf.cast(math.pi , x.dtype )
UpperCAmelCase : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype )
UpperCAmelCase : List[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__magic_name__ , 3 )) ))
return x * cdf
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = tf.convert_to_tensor(__magic_name__ )
return x * tf.tanh(tf.math.softplus(__magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype )
UpperCAmelCase : int = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : Optional[Any] = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return tf.clip_by_value(_gelu(__magic_name__ ) , -10 , 10 )
def lowercase ( __magic_name__ , __magic_name__=-1 ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Dict = tf.split(__magic_name__ , 2 , axis=__magic_name__ )
return a * tf.math.sigmoid(__magic_name__ )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowercase ( __magic_name__ ):
'''simple docstring'''
return tf.keras.activations.gelu(__magic_name__ , approximate=__magic_name__ )
a : Tuple = tf.keras.activations.gelu
a : Dict = approximate_gelu_wrap
else:
a : List[str] = _gelu
a : List[Any] = _gelu_new
a : Optional[int] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowercase ( __magic_name__ ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 679 | 0 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_lowercase : int =logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class UpperCamelCase_ ( lowercase__ ):
def __init__( self : Tuple , *lowerCamelCase : Tuple , **lowerCamelCase : int ):
super().__init__(*lowerCamelCase , **lowerCamelCase )
self.check_model_type(lowerCamelCase )
def __a ( self : Tuple , lowerCamelCase : Tuple=None , lowerCamelCase : Any=None , lowerCamelCase : Any=None , **lowerCamelCase : Tuple ):
lowerCamelCase_ : List[str] = {}, {}
if padding is not None:
lowerCamelCase_ : Any = padding
if truncation is not None:
lowerCamelCase_ : Any = truncation
if top_k is not None:
lowerCamelCase_ : str = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Dict , lowerCamelCase : List[str] , lowerCamelCase : int = None , **lowerCamelCase : Union[str, Any] ):
if isinstance(lowerCamelCase , (Image.Image, str) ) and isinstance(lowerCamelCase , lowerCamelCase ):
lowerCamelCase_ : Tuple = {"image": image, "question": question}
else:
lowerCamelCase_ : str = image
lowerCamelCase_ : str = super().__call__(lowerCamelCase , **lowerCamelCase )
return results
def __a ( self : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : str=False , lowerCamelCase : str=False ):
lowerCamelCase_ : List[Any] = load_image(inputs['image'] )
lowerCamelCase_ : Optional[Any] = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=lowerCamelCase , truncation=lowerCamelCase )
lowerCamelCase_ : Optional[Any] = self.image_processor(images=lowerCamelCase , return_tensors=self.framework )
model_inputs.update(lowerCamelCase )
return model_inputs
def __a ( self : Tuple , lowerCamelCase : str ):
lowerCamelCase_ : Dict = self.model(**lowerCamelCase )
return model_outputs
def __a ( self : Any , lowerCamelCase : Tuple , lowerCamelCase : Dict=5 ):
if top_k > self.model.config.num_labels:
lowerCamelCase_ : Tuple = self.model.config.num_labels
if self.framework == "pt":
lowerCamelCase_ : Optional[int] = model_outputs.logits.sigmoid()[0]
lowerCamelCase_ : str = probs.topk(lowerCamelCase )
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
lowerCamelCase_ : Optional[int] = scores.tolist()
lowerCamelCase_ : List[Any] = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase , lowerCamelCase )]
| 364 |
'''simple docstring'''
from __future__ import annotations
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = order
# a_{0} ... a_{k}
UpperCAmelCase : Optional[int] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCAmelCase : List[Any] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCAmelCase : Dict = [0.0] * self.order
# y[n-1] ... y[n-k]
UpperCAmelCase : Optional[Any] = [0.0] * self.order
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
if len(snake_case ) < self.order:
UpperCAmelCase : Dict = [1.0, *a_coeffs]
if len(snake_case ) != self.order + 1:
UpperCAmelCase : Optional[Any] = (
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(snake_case )}"
)
raise ValueError(snake_case )
if len(snake_case ) != self.order + 1:
UpperCAmelCase : Optional[Any] = (
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(snake_case )}"
)
raise ValueError(snake_case )
UpperCAmelCase : Optional[int] = a_coeffs
UpperCAmelCase : Optional[Any] = b_coeffs
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCAmelCase : Optional[int] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCAmelCase : List[str] = self.input_history[:-1]
UpperCAmelCase : List[Any] = self.output_history[:-1]
UpperCAmelCase : str = sample
UpperCAmelCase : str = result
return result
| 679 | 0 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__a : List[str] = logging.get_logger(__name__)
def __magic_name__ ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = nn.functional.normalize(lowercase_ )
UpperCamelCase = nn.functional.normalize(lowercase_ )
return torch.mm(lowercase_ , normalized_text_embeds.t() )
class __UpperCAmelCase ( lowercase__ ):
"""simple docstring"""
lowercase = CLIPConfig
lowercase = ["CLIPEncoderLayer"]
def __init__( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE )
UpperCamelCase = CLIPVisionModel(config.vision_config )
UpperCamelCase = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.Parameter(torch.ones(17 ) , requires_grad=SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.Parameter(torch.ones(3 ) , requires_grad=SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.vision_model(SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase = self.visual_projection(SCREAMING_SNAKE_CASE )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase = cosine_distance(SCREAMING_SNAKE_CASE , self.special_care_embeds ).cpu().float().numpy()
UpperCamelCase = cosine_distance(SCREAMING_SNAKE_CASE , self.concept_embeds ).cpu().float().numpy()
UpperCamelCase = []
UpperCamelCase = image_embeds.shape[0]
for i in range(SCREAMING_SNAKE_CASE ):
UpperCamelCase = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
UpperCamelCase = special_cos_dist[i][concept_idx]
UpperCamelCase = self.special_care_embeds_weights[concept_idx].item()
UpperCamelCase = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
UpperCamelCase = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
UpperCamelCase = cos_dist[i][concept_idx]
UpperCamelCase = self.concept_embeds_weights[concept_idx].item()
UpperCamelCase = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(SCREAMING_SNAKE_CASE )
result.append(SCREAMING_SNAKE_CASE )
UpperCamelCase = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = self.vision_model(SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase = self.visual_projection(SCREAMING_SNAKE_CASE )
UpperCamelCase = cosine_distance(SCREAMING_SNAKE_CASE , self.special_care_embeds )
UpperCamelCase = cosine_distance(SCREAMING_SNAKE_CASE , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase = 0.0
UpperCamelCase = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
UpperCamelCase = torch.any(special_scores > 0 , dim=1 )
UpperCamelCase = special_care * 0.01
UpperCamelCase = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
UpperCamelCase = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
UpperCamelCase = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 606 |
'''simple docstring'''
import argparse
from collections import defaultdict
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = F"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Tuple = f.readlines()
UpperCAmelCase : Tuple = F"class {class_name}("
UpperCAmelCase : str = F"{4 * ' '}def {test_name}("
UpperCAmelCase : Dict = F"{8 * ' '}{correct_line.split()[0]}"
UpperCAmelCase : Tuple = F"{16 * ' '}{correct_line.split()[0]}"
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : List[str] = False
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : Dict = False
UpperCAmelCase : Tuple = 0
UpperCAmelCase : int = 0
UpperCAmelCase : Tuple = []
for line in lines:
if line.startswith(__magic_name__ ):
UpperCAmelCase : int = True
elif in_class and line.startswith(__magic_name__ ):
UpperCAmelCase : Dict = True
elif in_class and in_func and (line.startswith(__magic_name__ ) or line.startswith(__magic_name__ )):
UpperCAmelCase : List[str] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCAmelCase : List[str] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCAmelCase : List[str] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"{spaces * ' '}{correct_line}" )
UpperCAmelCase : List[str] = False
else:
new_lines.append(__magic_name__ )
with open(__magic_name__ , "w" ) as f:
for line in new_lines:
f.write(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__=None ):
'''simple docstring'''
if fail is not None:
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Optional[int] = {l.strip() for l in f.readlines()}
else:
UpperCAmelCase : Any = None
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Tuple = f.readlines()
UpperCAmelCase : int = defaultdict(__magic_name__ )
for line in correct_lines:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
a : List[Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 679 | 0 |
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
_lowerCAmelCase : List[str] = TOKENIZER_CLASSES
else:
_lowerCAmelCase : int = {tokenizer_name: getattr(_lowerCamelCase , tokenizer_name + 'Fast' )}
logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
_lowerCAmelCase : Tuple = TOKENIZER_CLASSES[tokenizer_name]
_lowerCAmelCase : Union[str, Any] = True
if checkpoint_name is None:
_lowerCAmelCase : List[str] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_lowerCAmelCase : Dict = [checkpoint_name]
logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
_lowerCAmelCase : Union[str, Any] = tokenizer_class.from_pretrained(_lowerCamelCase , force_download=_lowerCamelCase )
# Save fast tokenizer
logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
_lowerCAmelCase : Dict = checkpoint.split('/' )
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
elif add_prefix:
_lowerCAmelCase : List[Any] = checkpoint
_lowerCAmelCase : str = dump_path
else:
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : List[Any] = dump_path
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_lowerCAmelCase : List[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_lowerCAmelCase : List[Any] = file_path.split(_lowerCamelCase )[-1][0]
if next_char == "/":
_lowerCAmelCase : str = os.path.join(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = None
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
_lowerCAmelCase : Any = tokenizer.save_pretrained(
_lowerCamelCase , legacy_format=_lowerCamelCase , filename_prefix=_lowerCamelCase )
logger.info(f"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(_lowerCamelCase )
logger.info(f"""=> removing {file_name}""" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
_lowerCAmelCase = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 259 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : TreeNode | None = None
SCREAMING_SNAKE_CASE__ : TreeNode | None = None
a : Optional[Any] = namedtuple("CoinsDistribResult", "moves excess")
def lowercase ( __magic_name__ ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(__magic_name__ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__magic_name__ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__magic_name__ ) != count_coins(__magic_name__ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(__magic_name__ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = get_distrib(node.left )
UpperCAmelCase , UpperCAmelCase : Any = get_distrib(node.right )
UpperCAmelCase : Optional[Any] = 1 - left_distrib_excess
UpperCAmelCase : int = 1 - right_distrib_excess
UpperCAmelCase : List[Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(__magic_name__ )
+ abs(__magic_name__ )
)
UpperCAmelCase : List[Any] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__magic_name__ , __magic_name__ )
return get_distrib(__magic_name__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE__ (lowercase__ ):
def __init__( self : Any , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCAmelCase__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
@torch.no_grad()
def __call__( self : str , __lowerCamelCase : str = 1 , __lowerCamelCase : List[str] = None , __lowerCamelCase : List[str] = 0.0 , __lowerCamelCase : str = 50 , __lowerCamelCase : List[str] = None , __lowerCamelCase : List[Any] = "pil" , __lowerCamelCase : List[str] = True , ):
"""simple docstring"""
if isinstance(self.unet.config.sample_size , __lowerCamelCase ):
lowerCAmelCase__ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowerCAmelCase__ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__lowerCamelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
lowerCAmelCase__ = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCAmelCase__ = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase__ = self.scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , eta=__lowerCamelCase , use_clipped_model_output=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample
lowerCAmelCase__ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase__ = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 615 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a : List[Any] = logging.get_logger(__name__)
a : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a : int = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a : Any = {
"allenai/led-base-16384": 1_63_84,
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Tuple = LEDTokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , snake_case=True , **snake_case , ):
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(snake_case , pre_tok_state.pop("type" ) )
UpperCAmelCase : Any = add_prefix_space
UpperCAmelCase : str = pre_tok_class(**snake_case )
UpperCAmelCase : int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase : Dict = "post_processor"
UpperCAmelCase : Dict = getattr(self.backend_tokenizer , snake_case , snake_case )
if tokenizer_component_instance:
UpperCAmelCase : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase : int = tuple(state["sep"] )
if "cls" in state:
UpperCAmelCase : Union[str, Any] = tuple(state["cls"] )
UpperCAmelCase : Tuple = False
if state.get("add_prefix_space" , snake_case ) != add_prefix_space:
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Optional[int] = True
if state.get("trim_offsets" , snake_case ) != trim_offsets:
UpperCAmelCase : Tuple = trim_offsets
UpperCAmelCase : List[str] = True
if changes_to_apply:
UpperCAmelCase : Optional[Any] = getattr(snake_case , state.pop("type" ) )
UpperCAmelCase : Tuple = component_class(**snake_case )
setattr(self.backend_tokenizer , snake_case , snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def A_ ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value
UpperCAmelCase : Optional[Any] = value
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case , **snake_case )
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case , **snake_case )
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : str = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def A_ ( self , snake_case , snake_case=None ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self , snake_case , snake_case = None , snake_case = PaddingStrategy.DO_NOT_PAD , snake_case = None , snake_case = None , ):
'''simple docstring'''
UpperCAmelCase : int = super()._pad(
encoded_inputs=snake_case , max_length=snake_case , padding_strategy=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , )
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase : int = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase : Optional[int] = len(encoded_inputs["global_attention_mask"] ) != len(snake_case )
if needs_to_be_padded:
UpperCAmelCase : Tuple = len(snake_case ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase : List[str] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase : Any = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 679 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.