code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from PIL import Image
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Image,_SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__A= (259 * (level + 255)) / (255 * (259 - level))
def contrast(_SCREAMING_SNAKE_CASE : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(UpperCamelCase_ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
UpperCAmelCase__ = change_contrast(img, 1_7_0)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 186
|
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : int ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = BigBirdConfig.from_json_file(UpperCamelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
lowerCAmelCase__ = BigBirdForQuestionAnswering(UpperCamelCase_ )
else:
lowerCAmelCase__ = BigBirdForPreTraining(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(UpperCamelCase_ , UpperCamelCase_ , is_trivia_qa=UpperCamelCase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
UpperCAmelCase__ : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 48
| 0
|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
snake_case__ : Dict = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
'''simple docstring'''
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ):
UpperCAmelCase_ : Union[str, Any] = [label.strip() for label in labels.split(',' ) if label.strip()]
return labels
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
if len(lowercase__ ) == 0 or len(lowercase__ ) == 0:
raise ValueError('You must include at least one label and at least one sequence.' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. '
'Make sure the passed template includes formatting syntax such as {{}} where the label should go.'
).format(lowercase__ ) )
if isinstance(lowercase__ , lowercase__ ):
UpperCAmelCase_ : Optional[int] = [sequences]
UpperCAmelCase_ : Optional[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowercase__ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self , snake_case_=ZeroShotClassificationArgumentHandler() , *snake_case_ , **snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = args_parser
super().__init__(*lowercase__ , **lowercase__ )
if self.entailment_id == -1:
logger.warning(
'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '
'-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.' )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('entail' ):
return ind
return -1
def _UpperCamelCase ( self , snake_case_ , snake_case_=True , snake_case_=True , snake_case_=TruncationStrategy.ONLY_FIRST , **snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'Tokenizer was not supporting padding necessary for zero-shot, attempting to use '
' `pad_token=eos_token`' )
UpperCAmelCase_ : Optional[int] = self.tokenizer.eos_token
try:
UpperCAmelCase_ : Optional[int] = self.tokenizer(
lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , padding=lowercase__ , truncation=lowercase__ , )
except Exception as e:
if "too short" in str(lowercase__ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase_ : Optional[Any] = self.tokenizer(
lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , padding=lowercase__ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _UpperCamelCase ( self , **snake_case_ ):
'''simple docstring'''
if kwargs.get('multi_class' , lowercase__ ) is not None:
UpperCAmelCase_ : Optional[int] = kwargs["multi_class"]
logger.warning(
'The `multi_class` argument has been deprecated and renamed to `multi_label`. '
'`multi_class` will be removed in a future version of Transformers.' )
UpperCAmelCase_ : Optional[int] = {}
if "candidate_labels" in kwargs:
UpperCAmelCase_ : Union[str, Any] = self._args_parser._parse_labels(kwargs['candidate_labels'] )
if "hypothesis_template" in kwargs:
UpperCAmelCase_ : int = kwargs["hypothesis_template"]
UpperCAmelCase_ : List[Any] = {}
if "multi_label" in kwargs:
UpperCAmelCase_ : Tuple = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self , snake_case_ , *snake_case_ , **snake_case_ , ):
'''simple docstring'''
if len(lowercase__ ) == 0:
pass
elif len(lowercase__ ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase_ : Any = args[0]
else:
raise ValueError(F'''Unable to understand extra arguments {args}''' )
return super().__call__(lowercase__ , **lowercase__ )
def _UpperCamelCase ( self , snake_case_ , snake_case_=None , snake_case_="This example is {}." ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = self._args_parser(lowercase__ , lowercase__ , lowercase__ )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowercase__ , lowercase__ ) ):
UpperCAmelCase_ : Dict = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowercase__ ) - 1,
**model_input,
}
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = inputs["candidate_label"]
UpperCAmelCase_ : Union[str, Any] = inputs["sequence"]
UpperCAmelCase_ : Any = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase_ : List[Any] = self.model(**lowercase__ )
UpperCAmelCase_ : str = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def _UpperCamelCase ( self , snake_case_ , snake_case_=False ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [outputs["candidate_label"] for outputs in model_outputs]
UpperCAmelCase_ : Tuple = [outputs["sequence"] for outputs in model_outputs]
UpperCAmelCase_ : Optional[Any] = np.concatenate([output['logits'].numpy() for output in model_outputs] )
UpperCAmelCase_ : Any = logits.shape[0]
UpperCAmelCase_ : str = len(lowercase__ )
UpperCAmelCase_ : str = N // n
UpperCAmelCase_ : List[str] = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowercase__ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase_ : Optional[Any] = self.entailment_id
UpperCAmelCase_ : Optional[Any] = -1 if entailment_id == 0 else 0
UpperCAmelCase_ : str = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase_ : Optional[int] = np.exp(lowercase__ ) / np.exp(lowercase__ ).sum(-1 , keepdims=lowercase__ )
UpperCAmelCase_ : Tuple = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase_ : List[Any] = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase_ : List[Any] = np.exp(lowercase__ ) / np.exp(lowercase__ ).sum(-1 , keepdims=lowercase__ )
UpperCAmelCase_ : Dict = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 702
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
snake_case__ : Optional[int] = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[int] = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[Any] = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
snake_case__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 389
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Optional[int] = logging.get_logger(__name__)
A : Dict = {
'''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Tuple = '''glpn'''
def __init__( self : Optional[int] , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Tuple=[2, 2, 2, 2] , __lowerCAmelCase : int=[8, 4, 2, 1] , __lowerCAmelCase : int=[32, 64, 1_60, 2_56] , __lowerCAmelCase : str=[7, 3, 3, 3] , __lowerCAmelCase : Dict=[4, 2, 2, 2] , __lowerCAmelCase : List[str]=[1, 2, 5, 8] , __lowerCAmelCase : List[Any]=[4, 4, 4, 4] , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Union[str, Any]=0.0 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : List[Any]=0.0_2 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Dict=1e-6 , __lowerCAmelCase : Tuple=64 , __lowerCAmelCase : Tuple=10 , __lowerCAmelCase : Union[str, Any]=-1 , **__lowerCAmelCase : Any , ) -> Tuple:
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
A__ = num_channels
A__ = num_encoder_blocks
A__ = depths
A__ = sr_ratios
A__ = hidden_sizes
A__ = patch_sizes
A__ = strides
A__ = mlp_ratios
A__ = num_attention_heads
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = drop_path_rate
A__ = layer_norm_eps
A__ = decoder_hidden_size
A__ = max_depth
A__ = head_in_index
| 176
|
def __lowerCamelCase ( __a :str ) -> bool:
"""simple docstring"""
A__ = 0
for ch in input_str:
A__ = ord(__a )
A__ = pow(2 , __a )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 176
| 1
|
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : Optional[int]=False , **UpperCamelCase__ : str ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
UpperCamelCase = vocab_size
UpperCamelCase = d_embed
UpperCamelCase = d_proj
UpperCamelCase = cutoffs + [vocab_size]
UpperCamelCase = [0] + self.cutoffs
UpperCamelCase = div_val
UpperCamelCase = self.cutoffs[0]
UpperCamelCase = len(self.cutoffs ) - 1
UpperCamelCase = self.shortlist_size + self.n_clusters
UpperCamelCase = keep_order
UpperCamelCase = []
UpperCamelCase = []
def A ( self : int , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
if self.n_clusters > 0:
UpperCamelCase = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=UpperCamelCase__ , name='cluster_weight' )
UpperCamelCase = self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=UpperCamelCase__ , name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
UpperCamelCase = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=UpperCamelCase__ , name=f"""out_projs_._{i}""" , )
self.out_projs.append(UpperCamelCase__ )
else:
self.out_projs.append(UpperCamelCase__ )
UpperCamelCase = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=UpperCamelCase__ , name=f"""out_layers_._{i}_._weight""" , )
UpperCamelCase = self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=UpperCamelCase__ , name=f"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = self.d_embed // (self.div_val**i)
UpperCamelCase = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=UpperCamelCase__ , name=f"""out_projs_._{i}""" )
self.out_projs.append(UpperCamelCase__ )
UpperCamelCase = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=UpperCamelCase__ , name=f"""out_layers_._{i}_._weight""" , )
UpperCamelCase = self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=UpperCamelCase__ , name=f"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
super().build(UpperCamelCase__ )
@staticmethod
def A ( UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : str=None ):
"""simple docstring"""
UpperCamelCase = x
if proj is not None:
UpperCamelCase = tf.einsum('ibd,ed->ibe' , UpperCamelCase__ , UpperCamelCase__ )
return tf.einsum('ibd,nd->ibn' , UpperCamelCase__ , UpperCamelCase__ ) + b
@staticmethod
def A ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ):
"""simple docstring"""
UpperCamelCase = shape_list(UpperCamelCase__ )
UpperCamelCase = tf.range(lp_size[0] , dtype=target.dtype )
UpperCamelCase = tf.stack([r, target] , 1 )
return tf.gather_nd(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Union[str, Any]=False ):
"""simple docstring"""
UpperCamelCase = 0
if self.n_clusters == 0:
UpperCamelCase = self._logit(UpperCamelCase__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
UpperCamelCase = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=UpperCamelCase__ , logits=UpperCamelCase__ )
UpperCamelCase = tf.nn.log_softmax(UpperCamelCase__ , axis=-1 )
else:
UpperCamelCase = shape_list(UpperCamelCase__ )
UpperCamelCase = []
UpperCamelCase = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
UpperCamelCase = (target >= l_idx) & (target < r_idx)
UpperCamelCase = tf.where(UpperCamelCase__ )
UpperCamelCase = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ ) - l_idx
if self.div_val == 1:
UpperCamelCase = self.out_layers[0][0][l_idx:r_idx]
UpperCamelCase = self.out_layers[0][1][l_idx:r_idx]
else:
UpperCamelCase = self.out_layers[i][0]
UpperCamelCase = self.out_layers[i][1]
if i == 0:
UpperCamelCase = tf.concat([cur_W, self.cluster_weight] , 0 )
UpperCamelCase = tf.concat([cur_b, self.cluster_bias] , 0 )
UpperCamelCase = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[0] )
UpperCamelCase = tf.nn.log_softmax(UpperCamelCase__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
UpperCamelCase = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
else:
UpperCamelCase = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[i] )
UpperCamelCase = tf.nn.log_softmax(UpperCamelCase__ )
UpperCamelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
UpperCamelCase = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(UpperCamelCase__ )
if target is not None:
UpperCamelCase = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(UpperCamelCase__ , -cur_logprob , shape_list(UpperCamelCase__ ) )
UpperCamelCase = tf.concat(UpperCamelCase__ , axis=-1 )
if target is not None:
if return_mean:
UpperCamelCase = tf.reduce_mean(UpperCamelCase__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(UpperCamelCase__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(UpperCamelCase__ , name=self.name , aggregation='mean' if return_mean else '' )
return out
| 324
|
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_lowerCamelCase : List[str] = datasets.load_iris()
_lowerCamelCase : Optional[Any] = np.array(data["data"])
_lowerCamelCase : Tuple = np.array(data["target"])
_lowerCamelCase : int = data["target_names"]
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase : Any = train_test_split(X, y)
def __lowerCamelCase ( A__ , A__ ) -> int:
"""simple docstring"""
return np.linalg.norm(np.array(A__ ) - np.array(A__ ) )
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__=5 ) -> Any:
"""simple docstring"""
UpperCamelCase = zip(A__ , A__ )
# List of distances of all points from the point to be classified
UpperCamelCase = []
for data_point in data:
UpperCamelCase = euclidean_distance(data_point[0] , A__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
UpperCamelCase = [i[1] for i in sorted(A__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
UpperCamelCase = Counter(A__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 324
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_layoutlmv3": [
"LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LayoutLMv3Config",
"LayoutLMv3OnnxConfig",
],
"processing_layoutlmv3": ["LayoutLMv3Processor"],
"tokenization_layoutlmv3": ["LayoutLMv3Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["LayoutLMv3TokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv3ForQuestionAnswering",
"LayoutLMv3ForSequenceClassification",
"LayoutLMv3ForTokenClassification",
"LayoutLMv3Model",
"LayoutLMv3PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLayoutLMv3ForQuestionAnswering",
"TFLayoutLMv3ForSequenceClassification",
"TFLayoutLMv3ForTokenClassification",
"TFLayoutLMv3Model",
"TFLayoutLMv3PreTrainedModel",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["LayoutLMv3FeatureExtractor"]
SCREAMING_SNAKE_CASE__ = ["LayoutLMv3ImageProcessor"]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 532
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
@dataclass
class lowercase :
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
@dataclass
class lowercase :
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'train'
_SCREAMING_SNAKE_CASE = 'dev'
_SCREAMING_SNAKE_CASE = 'test'
class lowercase :
@staticmethod
def _snake_case ( lowercase , lowercase ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def _snake_case ( lowercase ) -> List[str]:
raise NotImplementedError
@staticmethod
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase=False , lowercase="[CLS]" , lowercase=1 , lowercase="[SEP]" , lowercase=False , lowercase=False , lowercase=0 , lowercase=0 , lowercase=-100 , lowercase=0 , lowercase=True , ) -> List[InputFeatures]:
lowerCAmelCase = {label: i for i, label in enumerate(lowercase )}
lowerCAmelCase = []
for ex_index, example in enumerate(lowercase ):
if ex_index % 10_000 == 0:
logger.info("""Writing example %d of %d""" , lowercase , len(lowercase ) )
lowerCAmelCase = []
lowerCAmelCase = []
for word, label in zip(example.words , example.labels ):
lowerCAmelCase = tokenizer.tokenize(lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(lowercase ) > 0:
tokens.extend(lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
lowerCAmelCase = tokenizer.num_special_tokens_to_add()
if len(lowercase ) > max_seq_length - special_tokens_count:
lowerCAmelCase = tokens[: (max_seq_length - special_tokens_count)]
lowerCAmelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
lowerCAmelCase = [sequence_a_segment_id] * len(lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
lowerCAmelCase = [cls_token] + tokens
lowerCAmelCase = [pad_token_label_id] + label_ids
lowerCAmelCase = [cls_token_segment_id] + segment_ids
lowerCAmelCase = tokenizer.convert_tokens_to_ids(lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
lowerCAmelCase = [1 if mask_padding_with_zero else 0] * len(lowercase )
# Zero-pad up to the sequence length.
lowerCAmelCase = max_seq_length - len(lowercase )
if pad_on_left:
lowerCAmelCase = ([pad_token] * padding_length) + input_ids
lowerCAmelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
lowerCAmelCase = ([pad_token_segment_id] * padding_length) + segment_ids
lowerCAmelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(lowercase ) == max_seq_length
assert len(lowercase ) == max_seq_length
assert len(lowercase ) == max_seq_length
assert len(lowercase ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(lowercase ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(lowercase ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(lowercase ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(lowercase ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
lowerCAmelCase = None
features.append(
InputFeatures(
input_ids=lowercase , attention_mask=lowercase , token_type_ids=lowercase , label_ids=lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = nn.CrossEntropyLoss().ignore_index
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = None , lowercase=False , lowercase = Split.train , ) -> List[str]:
# Load data features from cache or dataset file
lowerCAmelCase = os.path.join(
lowercase , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(lowercase ):
if os.path.exists(lowercase ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
lowerCAmelCase = torch.load(lowercase )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
lowerCAmelCase = token_classification_task.read_examples_from_file(lowercase , lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
lowerCAmelCase = token_classification_task.convert_examples_to_features(
lowercase , lowercase , lowercase , lowercase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowercase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'Saving features into cached file {cached_features_file}' )
torch.save(self.features , lowercase )
def __len__( self ) -> int:
return len(self.features )
def __getitem__( self , lowercase ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowercase :
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = -100
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = None , lowercase=False , lowercase = Split.train , ) -> Any:
lowerCAmelCase = token_classification_task.read_examples_from_file(lowercase , lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
lowerCAmelCase = token_classification_task.convert_examples_to_features(
lowercase , lowercase , lowercase , lowercase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowercase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
lowerCAmelCase = tf.data.Dataset.from_generator(
lowercase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
lowerCAmelCase = tf.data.Dataset.from_generator(
lowercase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ) -> Optional[int]:
return len(self.features )
def __getitem__( self , lowercase ) -> InputFeatures:
return self.features[i]
| 532
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Dict = logging.get_logger(__name__)
snake_case_ : Optional[Any] = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = "openai-gpt"
lowerCamelCase = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Dict , __magic_name__ : Dict=4_0478 , __magic_name__ : List[str]=512 , __magic_name__ : Tuple=768 , __magic_name__ : str=12 , __magic_name__ : Optional[Any]=12 , __magic_name__ : Dict="gelu" , __magic_name__ : Tuple=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : Union[str, Any]=1e-5 , __magic_name__ : Optional[Any]=0.02 , __magic_name__ : Any="cls_index" , __magic_name__ : Union[str, Any]=True , __magic_name__ : List[str]=None , __magic_name__ : str=True , __magic_name__ : Optional[int]=0.1 , **__magic_name__ : Optional[Any] , ) -> Tuple:
lowerCamelCase_ : List[str] = vocab_size
lowerCamelCase_ : Optional[Any] = n_positions
lowerCamelCase_ : Optional[Any] = n_embd
lowerCamelCase_ : Tuple = n_layer
lowerCamelCase_ : List[str] = n_head
lowerCamelCase_ : int = afn
lowerCamelCase_ : Optional[int] = resid_pdrop
lowerCamelCase_ : List[str] = embd_pdrop
lowerCamelCase_ : List[str] = attn_pdrop
lowerCamelCase_ : List[str] = layer_norm_epsilon
lowerCamelCase_ : Optional[Any] = initializer_range
lowerCamelCase_ : Dict = summary_type
lowerCamelCase_ : Tuple = summary_use_proj
lowerCamelCase_ : Optional[int] = summary_activation
lowerCamelCase_ : Optional[int] = summary_first_dropout
lowerCamelCase_ : Union[str, Any] = summary_proj_to_labels
super().__init__(**__magic_name__ )
| 253
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = ["image_processor", "tokenizer"]
lowerCamelCase = "OwlViTImageProcessor"
lowerCamelCase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Any , __magic_name__ : str=None , __magic_name__ : Tuple=None , **__magic_name__ : Optional[int] ) -> List[str]:
lowerCamelCase_ : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __magic_name__ , )
lowerCamelCase_ : Dict = kwargs.pop("feature_extractor" )
lowerCamelCase_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self : int , __magic_name__ : str=None , __magic_name__ : Tuple=None , __magic_name__ : List[str]=None , __magic_name__ : int="max_length" , __magic_name__ : int="np" , **__magic_name__ : Tuple ) -> Union[str, Any]:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(__magic_name__ , __magic_name__ ) or (isinstance(__magic_name__ , __magic_name__ ) and not isinstance(text[0] , __magic_name__ )):
lowerCamelCase_ : List[Any] = [self.tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )]
elif isinstance(__magic_name__ , __magic_name__ ) and isinstance(text[0] , __magic_name__ ):
lowerCamelCase_ : str = []
# Maximum number of queries across batch
lowerCamelCase_ : Union[str, Any] = max([len(__magic_name__ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__magic_name__ ) != max_num_queries:
lowerCamelCase_ : List[Any] = t + [" "] * (max_num_queries - len(__magic_name__ ))
lowerCamelCase_ : Dict = self.tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
encodings.append(__magic_name__ )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
lowerCamelCase_ : Any = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCamelCase_ : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCamelCase_ : List[str] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCamelCase_ : List[Any] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCamelCase_ : int = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
lowerCamelCase_ : List[str] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCamelCase_ : Union[str, Any] = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCamelCase_ : List[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
lowerCamelCase_ : Dict = BatchEncoding()
lowerCamelCase_ : Dict = input_ids
lowerCamelCase_ : Optional[int] = attention_mask
if query_images is not None:
lowerCamelCase_ : Tuple = BatchEncoding()
lowerCamelCase_ : List[Any] = self.image_processor(
__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ).pixel_values
lowerCamelCase_ : Optional[int] = query_pixel_values
if images is not None:
lowerCamelCase_ : Tuple = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None and images is not None:
lowerCamelCase_ : Optional[int] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCamelCase_ : Tuple = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any , *__magic_name__ : Union[str, Any] , **__magic_name__ : Union[str, Any] ) -> Any:
return self.image_processor.post_process(*__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any , *__magic_name__ : str , **__magic_name__ : Union[str, Any] ) -> Optional[int]:
return self.image_processor.post_process_object_detection(*__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : int , *__magic_name__ : Tuple , **__magic_name__ : List[Any] ) -> Union[str, Any]:
return self.image_processor.post_process_image_guided_detection(*__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple , *__magic_name__ : List[str] , **__magic_name__ : List[Any] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , *__magic_name__ : Any , **__magic_name__ : int ) -> int:
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __magic_name__ , )
return self.image_processor_class
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __magic_name__ , )
return self.image_processor
| 253
| 1
|
'''simple docstring'''
def lowercase__( __UpperCamelCase: int = 1_00_00_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,__UpperCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 28
|
"""simple docstring"""
def _lowerCamelCase( a ):
return " ".join(
"".join(word[::-1] ) if len(a ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 528
| 0
|
from math import pi, sqrt, tan
def _snake_case ( SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _snake_case ( SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def _snake_case ( SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
_lowerCAmelCase : Tuple = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(SCREAMING_SNAKE_CASE , 2 ) * torus_radius * tube_radius
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def _snake_case ( SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
_lowerCAmelCase : List[str] = (sidea + sidea + sidea) / 2
_lowerCAmelCase : Optional[Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def _snake_case ( SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f'''Rectangle: {area_rectangle(1_0, 2_0) = }''')
print(f'''Square: {area_square(1_0) = }''')
print(f'''Triangle: {area_triangle(1_0, 1_0) = }''')
print(f'''Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }''')
print(f'''Parallelogram: {area_parallelogram(1_0, 2_0) = }''')
print(f'''Rhombus: {area_rhombus(1_0, 2_0) = }''')
print(f'''Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }''')
print(f'''Circle: {area_circle(2_0) = }''')
print(f'''Ellipse: {area_ellipse(1_0, 2_0) = }''')
print('\nSurface Areas of various geometric shapes: \n')
print(f'''Cube: {surface_area_cube(2_0) = }''')
print(f'''Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }''')
print(f'''Sphere: {surface_area_sphere(2_0) = }''')
print(f'''Hemisphere: {surface_area_hemisphere(2_0) = }''')
print(f'''Cone: {surface_area_cone(1_0, 2_0) = }''')
print(f'''Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }''')
print(f'''Cylinder: {surface_area_cylinder(1_0, 2_0) = }''')
print(f'''Torus: {surface_area_torus(2_0, 1_0) = }''')
print(f'''Equilateral Triangle: {area_reg_polygon(3, 1_0) = }''')
print(f'''Square: {area_reg_polygon(4, 1_0) = }''')
print(f'''Reqular Pentagon: {area_reg_polygon(5, 1_0) = }''')
| 703
|
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class A__ ( A ):
"""simple docstring"""
def __init__( self : Tuple , *A_ : Optional[int] , **A_ : int ):
'''simple docstring'''
warnings.warn(
"The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ChineseCLIPImageProcessor instead." , A_ , )
super().__init__(*A_ , **A_ )
| 503
| 0
|
import logging
import os
from .state import PartialState
class lowerCamelCase__ ( logging.LoggerAdapter):
'''simple docstring'''
@staticmethod
def _lowerCamelCase ( a :int ) -> Optional[Any]:
__UpperCamelCase : Union[str, Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _lowerCamelCase ( self :Tuple , a :List[Any] , a :List[Any] , *a :Optional[Any] , **a :Union[str, Any] ) -> Dict:
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
__UpperCamelCase : Union[str, Any] = kwargs.pop("main_process_only" , a )
__UpperCamelCase : List[str] = kwargs.pop("in_order" , a )
if self.isEnabledFor(a ):
if self._should_log(a ):
__UpperCamelCase , __UpperCamelCase : List[str] = self.process(a , a )
self.logger.log(a , a , *a , **a )
elif in_order:
__UpperCamelCase : List[Any] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__UpperCamelCase , __UpperCamelCase : List[Any] = self.process(a , a )
self.logger.log(a , a , *a , **a )
state.wait_for_everyone()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : str = None) -> Union[str, Any]:
'''simple docstring'''
if log_level is None:
__UpperCamelCase : int = os.environ.get("ACCELERATE_LOG_LEVEL" , _lowerCamelCase)
__UpperCamelCase : Dict = logging.getLogger(_lowerCamelCase)
if log_level is not None:
logger.setLevel(log_level.upper())
logger.root.setLevel(log_level.upper())
return MultiProcessAdapter(_lowerCamelCase , {})
| 557
|
from __future__ import annotations
lowercase : Dict = tuple[int, int, int]
lowercase : List[str] = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
lowercase : int = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
lowercase : Optional[int] = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
lowercase : Optional[Any] = 'FOBHMDKEXQNRAULPGSJVTYICZW'
lowercase : str = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
lowercase : Tuple = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
lowercase : Optional[int] = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
lowercase : List[Any] = 'SGLCPQWZHKXAREONTFBVIYJUDM'
lowercase : Dict = 'HVSICLTYKQUBXDWAJZOMFGPREN'
lowercase : List[Any] = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
lowercase : Optional[Any] = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
lowercase : Tuple = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : RotorPositionT , _lowerCamelCase : RotorSelectionT , _lowerCamelCase : str) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
'''simple docstring'''
if (unique_rotsel := len(set(_lowerCamelCase))) < 3:
__UpperCamelCase : Tuple = F'Please use 3 unique rotors (not {unique_rotsel})'
raise Exception(_lowerCamelCase)
# Checks if rotor positions are valid
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : str = rotpos
if not 0 < rotorposa <= len(_lowerCamelCase):
__UpperCamelCase : Union[str, Any] = F'First rotor position is not within range of 1..26 ({rotorposa}'
raise ValueError(_lowerCamelCase)
if not 0 < rotorposa <= len(_lowerCamelCase):
__UpperCamelCase : int = F'Second rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(_lowerCamelCase)
if not 0 < rotorposa <= len(_lowerCamelCase):
__UpperCamelCase : List[Any] = F'Third rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(_lowerCamelCase)
# Validates string and returns dict
__UpperCamelCase : str = _plugboard(_lowerCamelCase)
return rotpos, rotsel, pbdict
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> dict[str, str]:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase):
__UpperCamelCase : Union[str, Any] = F'Plugboard setting isn\'t type string ({type(_lowerCamelCase)})'
raise TypeError(_lowerCamelCase)
elif len(_lowerCamelCase) % 2 != 0:
__UpperCamelCase : int = F'Odd number of symbols ({len(_lowerCamelCase)})'
raise Exception(_lowerCamelCase)
elif pbstring == "":
return {}
pbstring.replace(" " , "")
# Checks if all characters are unique
__UpperCamelCase : Optional[int] = set()
for i in pbstring:
if i not in abc:
__UpperCamelCase : Tuple = F'\'{i}\' not in list of symbols'
raise Exception(_lowerCamelCase)
elif i in tmppbl:
__UpperCamelCase : Tuple = F'Duplicate symbol ({i})'
raise Exception(_lowerCamelCase)
else:
tmppbl.add(_lowerCamelCase)
del tmppbl
# Created the dictionary
__UpperCamelCase : Union[str, Any] = {}
for j in range(0 , len(_lowerCamelCase) - 1 , 2):
__UpperCamelCase : Union[str, Any] = pbstring[j + 1]
__UpperCamelCase : str = pbstring[j]
return pb
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : RotorPositionT , _lowerCamelCase : RotorSelectionT = (rotora, rotora, rotora) , _lowerCamelCase : str = "" , ) -> str:
'''simple docstring'''
__UpperCamelCase : List[Any] = text.upper()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = _validator(
_lowerCamelCase , _lowerCamelCase , plugb.upper())
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Tuple = rotor_position
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
__UpperCamelCase : List[str] = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
__UpperCamelCase : Dict = plugboard[symbol]
# rotor ra --------------------------
__UpperCamelCase : Optional[int] = abc.index(_lowerCamelCase) + rotorposa
__UpperCamelCase : List[Any] = rotora[index % len(_lowerCamelCase)]
# rotor rb --------------------------
__UpperCamelCase : Dict = abc.index(_lowerCamelCase) + rotorposa
__UpperCamelCase : Any = rotora[index % len(_lowerCamelCase)]
# rotor rc --------------------------
__UpperCamelCase : str = abc.index(_lowerCamelCase) + rotorposa
__UpperCamelCase : Union[str, Any] = rotora[index % len(_lowerCamelCase)]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
__UpperCamelCase : Union[str, Any] = reflector[symbol]
# 2nd rotors
__UpperCamelCase : Optional[int] = abc[rotora.index(_lowerCamelCase) - rotorposa]
__UpperCamelCase : Optional[Any] = abc[rotora.index(_lowerCamelCase) - rotorposa]
__UpperCamelCase : str = abc[rotora.index(_lowerCamelCase) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
__UpperCamelCase : Optional[Any] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_lowerCamelCase):
__UpperCamelCase : Any = 0
rotorposa += 1
if rotorposa >= len(_lowerCamelCase):
__UpperCamelCase : Tuple = 0
rotorposa += 1
if rotorposa >= len(_lowerCamelCase):
__UpperCamelCase : List[str] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_lowerCamelCase)
return "".join(_lowerCamelCase)
if __name__ == "__main__":
lowercase : Optional[Any] = 'This is my Python script that emulates the Enigma machine from WWII.'
lowercase : int = (1, 1, 1)
lowercase : Optional[Any] = 'pictures'
lowercase : Optional[Any] = (rotora, rotora, rotora)
lowercase : Optional[Any] = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
| 557
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__SCREAMING_SNAKE_CASE : List[Any] = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
__SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623
| 1
|
"""simple docstring"""
import os
import sys
import transformers
SCREAMING_SNAKE_CASE__ = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 532
|
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def __lowerCamelCase ( A__ ) -> str:
"""simple docstring"""
if not sentence:
return ""
UpperCamelCase = dict(zip(A__ , A__ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 430
| 0
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class UpperCamelCase__ ( __lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = BarthezTokenizer
_SCREAMING_SNAKE_CASE : Any = BarthezTokenizerFast
_SCREAMING_SNAKE_CASE : Optional[Any] = True
_SCREAMING_SNAKE_CASE : Tuple = True
def lowerCAmelCase (self : List[str] ):
super().setUp()
__a : Optional[Any] = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case_ )
__a : Tuple = tokenizer
def lowerCAmelCase (self : int ):
__a : Any = '''<pad>'''
__a : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCAmelCase (self : int ):
__a : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(snake_case_ ) , 1_0_1_1_2_2 )
def lowerCAmelCase (self : Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 )
@require_torch
def lowerCAmelCase (self : Any ):
__a : Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__a : Optional[int] = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
__a : Optional[Any] = self.tokenizer(
snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , truncation=snake_case_ , return_tensors='''pt''' )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__a : str = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase (self : Union[str, Any] ):
if not self.test_rust_tokenizer:
return
__a : Any = self.get_tokenizer()
__a : Dict = self.get_rust_tokenizer()
__a : str = '''I was born in 92000, and this is falsé.'''
__a : Union[str, Any] = tokenizer.tokenize(snake_case_ )
__a : int = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__a : str = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
__a : List[Any] = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__a : Optional[int] = self.get_rust_tokenizer()
__a : Optional[int] = tokenizer.encode(snake_case_ )
__a : Any = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCAmelCase (self : int ):
# fmt: off
__a : List[str] = {'''input_ids''': [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__a : int = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=snake_case_ , )
| 702
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase__ ={
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 326
| 0
|
"""simple docstring"""
from __future__ import annotations
_lowercase = 1.6021e-19 # units = C
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif conductivity < 0:
raise ValueError('Conductivity cannot be negative' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative' )
elif mobility < 0:
raise ValueError('mobility cannot be negative' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 91
|
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def a ():
__a = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=lowerCAmelCase__ , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=lowerCAmelCase__ , default=5 )
parser.add_argument("""--batch_size""" , type=lowerCAmelCase__ , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=lowerCAmelCase__ , default=1 )
parser.add_argument("""--freeze""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
parser.add_argument("""--learning_rate""" , type=lowerCAmelCase__ , default=5E-4 )
parser.add_argument("""--seed""" , type=lowerCAmelCase__ , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=lowerCAmelCase__ , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=lowerCAmelCase__ , default=10 )
parser.add_argument("""--weight_decay""" , type=lowerCAmelCase__ , default=0.0_1 )
parser.add_argument("""--output_dir""" , type=lowerCAmelCase__ , default="""./results""" )
return parser.parse_args()
SCREAMING_SNAKE_CASE = load('accuracy')
def a (lowerCAmelCase__ ):
__a , __a = eval_pred
__a = np.argmax(lowerCAmelCase__ , axis=1 )
return metric.compute(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )
class __UpperCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , __A ):
super().__init__()
__a = trainer
def snake_case_ ( self , __A , __A , __A , **__A ):
if control.should_evaluate:
__a = deepcopy(__A )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def a ():
__a = get_args()
set_seed(args.seed )
__a = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
__a = dataset.train_test_split(test_size=0.2 )
__a = train_test["""test"""].train_test_split(test_size=0.5 )
__a = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
__a = AutoTokenizer.from_pretrained(args.model_ckpt )
__a = tokenizer.eos_token
__a = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
__a = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__a = False
__a = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(lowerCAmelCase__ ):
__a = tokenizer(example["""src"""] , truncation=lowerCAmelCase__ , max_length=1_024 )
__a = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__a = train_test_validation.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=train_test_validation["""train"""].column_names , )
__a = DataCollatorWithPadding(tokenizer=lowerCAmelCase__ )
__a = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.0_1 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
__a = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , compute_metrics=lowerCAmelCase__ , )
print("""Training...""" )
trainer.add_callback(CustomCallback(lowerCAmelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 99
| 0
|
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowerCamelCase__ ( __lowerCamelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def lowerCamelCase__ ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] =XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(__lowerCamelCase , __lowerCamelCase )
# Predict target for test data
_UpperCAmelCase : int =xgb.predict(__lowerCamelCase )
_UpperCAmelCase : Optional[Any] =predictions.reshape(len(__lowerCamelCase ) , 1 )
return predictions
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : str =fetch_california_housing()
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] =data_handling(__lowerCamelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] =train_test_split(
__lowerCamelCase , __lowerCamelCase , test_size=0.25 , random_state=1 )
_UpperCAmelCase : List[str] =xgboost(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Error printing
print(f"Mean Absolute Error : {mean_absolute_error(__lowerCamelCase , __lowerCamelCase )}" )
print(f"Mean Square Error : {mean_squared_error(__lowerCamelCase , __lowerCamelCase )}" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 331
|
'''simple docstring'''
from __future__ import annotations
lowercase ='Muhammad Umer Farooq'
lowercase ='MIT'
lowercase ='1.0.0'
lowercase ='Muhammad Umer Farooq'
lowercase ='contact@muhammadumerfarooq.me'
lowercase ='Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , snake_case) -> None:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : list[str] =[]
_UpperCAmelCase : List[Any] =domain
def lowerCAmelCase ( self , snake_case , snake_case) -> None:
'''simple docstring'''
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_UpperCAmelCase : Optional[int] =parse.urljoin(self.domain , snake_case)
self.urls.append(snake_case)
def lowerCamelCase__ ( __lowerCamelCase : str ):
'''simple docstring'''
return ".".join(get_sub_domain_name(__lowerCamelCase ).split('.' )[-2:] )
def lowerCamelCase__ ( __lowerCamelCase : str ):
'''simple docstring'''
return parse.urlparse(__lowerCamelCase ).netloc
def lowerCamelCase__ ( __lowerCamelCase : str = "https://github.com" ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =get_domain_name(__lowerCamelCase )
# Initialize the parser
_UpperCAmelCase : Optional[Any] =Parser(__lowerCamelCase )
try:
# Open URL
_UpperCAmelCase : Any =requests.get(__lowerCamelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
_UpperCAmelCase : Dict =set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_UpperCAmelCase : Dict =requests.get(__lowerCamelCase )
# Get the valid email.
_UpperCAmelCase : List[str] =re.findall('[a-zA-Z0-9]+@' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__lowerCamelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__lowerCamelCase )
if __name__ == "__main__":
lowercase =emails_from_url('https://github.com')
print(F"""{len(emails)} emails found:""")
print('\n'.join(sorted(emails)))
| 331
| 1
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : Optional[str] = field(
default=lowercase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(lowercase )} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
_snake_case : bool = field(
default=lowercase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
_snake_case : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_snake_case : bool = field(
default=lowercase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def __a ( self :Tuple ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """The input training data file (a text file)."""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
_snake_case : bool = field(
default=lowercase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
_snake_case : Optional[int] = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
_snake_case : Optional[int] = field(
default=lowercase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
_snake_case : float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
_snake_case : bool = field(
default=lowercase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def __a ( self :Dict ):
if self.train_file is not None:
UpperCamelCase__ :Optional[Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
UpperCamelCase__ :Optional[int] = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def A ( lowercase__ : Optional[Any] , lowercase__ : str ) -> List[Any]:
with open(lowercase__ , """r""" , encoding="""utf-8""" ) as f:
UpperCamelCase__ :Dict = [json.loads(lowercase__ ) for line in f.read().splitlines() if (len(lowercase__ ) > 0 and not line.isspace())]
assert len(lowercase__ ) == len(lowercase__ )
UpperCamelCase__ :int = {c: dataset[c] for c in dataset.column_names}
UpperCamelCase__ :List[Any] = refs
return Dataset.from_dict(lowercase__ )
def A ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ :Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Dict = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
UpperCamelCase__ :int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ :Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowercase__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCamelCase__ :List[str] = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
UpperCamelCase__ :Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""train[:{data_args.validation_split_percentage}%]""" , )
UpperCamelCase__ :Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""train[{data_args.validation_split_percentage}%:]""" , )
else:
UpperCamelCase__ :Union[str, Any] = {}
if data_args.train_file is not None:
UpperCamelCase__ :List[Any] = data_args.train_file
if data_args.validation_file is not None:
UpperCamelCase__ :str = data_args.validation_file
UpperCamelCase__ :Tuple = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
UpperCamelCase__ :List[str] = """text"""
UpperCamelCase__ :Optional[int] = load_dataset(lowercase__ , data_files=lowercase__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ :Union[str, Any] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase__ :List[str] = AutoConfig.from_pretrained(model_args.config_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Union[str, Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Union[str, Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
UpperCamelCase__ :Union[str, Any] = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
UpperCamelCase__ :Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Any = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
UpperCamelCase__ :Tuple = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase__ :Optional[Any] = AutoModelForMaskedLM.from_config(lowercase__ )
model.resize_token_embeddings(len(lowercase__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
UpperCamelCase__ :Dict = datasets["""train"""].column_names
else:
UpperCamelCase__ :str = datasets["""validation"""].column_names
UpperCamelCase__ :Optional[int] = """text""" if """text""" in column_names else column_names[0]
UpperCamelCase__ :str = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(lowercase__ : str ):
# Remove empty lines
UpperCamelCase__ :List[str] = [line for line in examples["""text"""] if len(lowercase__ ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=lowercase__ , truncation=lowercase__ , max_length=data_args.max_seq_length )
UpperCamelCase__ :int = datasets.map(
lowercase__ , batched=lowercase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
UpperCamelCase__ :Tuple = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
UpperCamelCase__ :Tuple = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
UpperCamelCase__ :Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
UpperCamelCase__ :List[str] = False
# Data collator
# This one will take care of randomly masking the tokens.
UpperCamelCase__ :str = DataCollatorForWholeWordMask(tokenizer=lowercase__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCamelCase__ :Union[str, Any] = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
UpperCamelCase__ :List[Any] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
UpperCamelCase__ :int = model_args.model_name_or_path
else:
UpperCamelCase__ :Optional[Any] = None
UpperCamelCase__ :List[Any] = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCamelCase__ :int = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(lowercase__ , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
UpperCamelCase__ :Optional[Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase__ :str = trainer.evaluate()
UpperCamelCase__ :Dict = math.exp(eval_output["""eval_loss"""] )
UpperCamelCase__ :int = perplexity
UpperCamelCase__ :Union[str, Any] = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(lowercase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
return results
def A ( lowercase__ : Tuple ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 45
|
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 332
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def A (__lowerCamelCase :str ):
_lowerCAmelCase = DPTConfig()
if "large" in checkpoint_url:
_lowerCAmelCase = 1024
_lowerCAmelCase = 4096
_lowerCAmelCase = 24
_lowerCAmelCase = 16
_lowerCAmelCase = [5, 11, 17, 23]
_lowerCAmelCase = [256, 512, 1024, 1024]
_lowerCAmelCase = (1, 384, 384)
if "ade" in checkpoint_url:
_lowerCAmelCase = True
_lowerCAmelCase = 150
_lowerCAmelCase = 'huggingface/label-files'
_lowerCAmelCase = 'ade20k-id2label.json'
_lowerCAmelCase = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
_lowerCAmelCase = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
_lowerCAmelCase = [1, 150, 480, 480]
return config, expected_shape
def A (__lowerCamelCase :Any ):
_lowerCAmelCase = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def A (__lowerCamelCase :int ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowerCAmelCase = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
_lowerCAmelCase = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
_lowerCAmelCase = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
_lowerCAmelCase = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
_lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
_lowerCAmelCase = name.replace("""proj""" , """projection""" )
if "blocks" in name:
_lowerCAmelCase = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
_lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
_lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
_lowerCAmelCase = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
_lowerCAmelCase = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
_lowerCAmelCase = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
_lowerCAmelCase = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
_lowerCAmelCase = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
_lowerCAmelCase = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
_lowerCAmelCase = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowerCAmelCase = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
_lowerCAmelCase = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
_lowerCAmelCase = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
_lowerCAmelCase = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
_lowerCAmelCase = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
_lowerCAmelCase = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowerCAmelCase = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowerCAmelCase = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowerCAmelCase = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowerCAmelCase = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowerCAmelCase = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
_lowerCAmelCase = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
_lowerCAmelCase = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
_lowerCAmelCase = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
_lowerCAmelCase = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
_lowerCAmelCase = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
_lowerCAmelCase = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
_lowerCAmelCase = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
_lowerCAmelCase = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
_lowerCAmelCase = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
_lowerCAmelCase = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
_lowerCAmelCase = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def A (__lowerCamelCase :Tuple , __lowerCamelCase :Optional[Any] ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
_lowerCAmelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase = in_proj_weight[: config.hidden_size, :]
_lowerCAmelCase = in_proj_bias[: config.hidden_size]
_lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def A ():
_lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCAmelCase = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def A (__lowerCamelCase :Tuple , __lowerCamelCase :int , __lowerCamelCase :Optional[Any] , __lowerCamelCase :Union[str, Any] ):
_lowerCAmelCase = get_dpt_config(_lowerCamelCase )
# load original state_dict from URL
_lowerCAmelCase = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(_lowerCamelCase )
# rename keys
for key in state_dict.copy().keys():
_lowerCAmelCase = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase = val
# read in qkv matrices
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
_lowerCAmelCase = DPTForSemanticSegmentation(_lowerCamelCase ) if 'ade' in checkpoint_url else DPTForDepthEstimation(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# Check outputs on an image
_lowerCAmelCase = 480 if 'ade' in checkpoint_url else 384
_lowerCAmelCase = DPTImageProcessor(size=_lowerCamelCase )
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(_lowerCamelCase , return_tensors="""pt""" )
# forward pass
_lowerCAmelCase = model(**_lowerCamelCase ).logits if 'ade' in checkpoint_url else model(**_lowerCamelCase ).predicted_depth
# Assert logits
_lowerCAmelCase = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] )
if "ade" in checkpoint_url:
_lowerCAmelCase = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] )
assert outputs.shape == torch.Size(_lowerCamelCase )
assert (
torch.allclose(outputs[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , _lowerCamelCase )
)
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCamelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
_lowercase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 716
|
'''simple docstring'''
from __future__ import annotations
def A (__lowerCamelCase :list[int] ):
if len(__lowerCamelCase ) == 0:
return array
_lowerCAmelCase , _lowerCAmelCase = min(__lowerCamelCase ), max(__lowerCamelCase )
# Compute the variables
_lowerCAmelCase = _max - _min + 1
_lowerCAmelCase , _lowerCAmelCase = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_lowerCAmelCase = i - _min
_lowerCAmelCase = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_lowerCAmelCase = 0
for i in range(__lowerCamelCase ):
while holes_repeat[i] > 0:
_lowerCAmelCase = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = input("""Enter numbers separated by comma:\n""")
_lowercase = [int(x) for x in user_input.split(""",""")]
print(pigeon_sort(unsorted))
| 162
| 0
|
import datasets
lowerCamelCase ="\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n"
lowerCamelCase ="\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n"
lowerCamelCase ="\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n 'accuracy': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n"
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return {"accuracy": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
| 285
|
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Dict = BertJapaneseTokenizer
__lowercase : List[str] = False
__lowercase : List[Any] = True
def snake_case_ ( self):
super().setUp()
__SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = """こんにちは、世界。 \nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.get_input_output_texts(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__)
return text, ids
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""")
self.assertListEqual(lowerCAmelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""")
self.assertIsNotNone(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """こんにちは、世界。\nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """tokenizer.bin""")
with open(lowerCAmelCase__ , """wb""") as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__)
with open(lowerCAmelCase__ , """rb""") as handle:
__SCREAMING_SNAKE_CASE = pickle.load(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="""ipadic""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def snake_case_ ( self):
try:
__SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="""unidic_lite""")
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def snake_case_ ( self):
try:
__SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="""unidic""")
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = MecabTokenizer(do_lower_case=lowerCAmelCase__ , mecab_dic="""ipadic""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def snake_case_ ( self):
try:
__SCREAMING_SNAKE_CASE = MecabTokenizer(
do_lower_case=lowerCAmelCase__ , normalize_text=lowerCAmelCase__ , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""")
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = MecabTokenizer(normalize_text=lowerCAmelCase__ , mecab_dic="""ipadic""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , )
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""")
self.assertIsNotNone(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """こんにちは、世界。\nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """tokenizer.bin""")
with open(lowerCAmelCase__ , """wb""") as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__)
with open(lowerCAmelCase__ , """rb""") as handle:
__SCREAMING_SNAKE_CASE = pickle.load(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""")
self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国""", """人""", """参政""", """権"""])
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""")
self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国人""", """参政権"""])
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""")
self.assertListEqual(tokenizer.tokenize("""外国人参政権""") , ["""外国人参政権"""])
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(do_lower_case=lowerCAmelCase__ , sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(normalize_text=lowerCAmelCase__ , sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , )
@require_sudachi
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = SudachiTokenizer(trim_whitespace=lowerCAmelCase__ , sudachi_dict_type="""core""")
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""")
self.assertIsNotNone(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """こんにちは、世界。\nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """tokenizer.bin""")
with open(lowerCAmelCase__ , """wb""") as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__)
with open(lowerCAmelCase__ , """rb""") as handle:
__SCREAMING_SNAKE_CASE = pickle.load(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = JumanppTokenizer(do_lower_case=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = JumanppTokenizer(normalize_text=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = JumanppTokenizer(trim_whitespace=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , )
@require_jumanpp
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""") , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
__SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=lowerCAmelCase__ , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""こんにちは""") , ["""こんにちは"""])
self.assertListEqual(tokenizer.tokenize("""こんばんは""") , ["""こん""", """##ばんは"""])
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""") , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""")
__SCREAMING_SNAKE_CASE = tokenizer.subword_tokenizer
__SCREAMING_SNAKE_CASE = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""")
self.assertListEqual(lowerCAmelCase__ , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""])
__SCREAMING_SNAKE_CASE = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""")
self.assertListEqual(lowerCAmelCase__ , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""")
__SCREAMING_SNAKE_CASE = tokenizer.encode("""ありがとう。""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode("""どういたしまして。""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : str = BertJapaneseTokenizer
__lowercase : int = False
def snake_case_ ( self):
super().setUp()
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
def snake_case_ ( self , **lowerCAmelCase__):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = """こんにちは、世界。 \nこんばんは、世界。"""
__SCREAMING_SNAKE_CASE = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
pass # TODO add if relevant
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""")
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""")
self.assertListEqual(
lowerCAmelCase__ , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
__SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = CharacterTokenizer(vocab=lowerCAmelCase__ , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""こんにちは""") , ["""こ""", """ん""", """に""", """ち""", """は"""])
self.assertListEqual(tokenizer.tokenize("""こんにちほ""") , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""")
__SCREAMING_SNAKE_CASE = tokenizer.encode("""ありがとう。""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode("""どういたしまして。""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """cl-tohoku/bert-base-japanese"""
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""") as cm:
BertTokenizer.from_pretrained(lowerCAmelCase__)
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from."""))
__SCREAMING_SNAKE_CASE = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""") as cm:
BertJapaneseTokenizer.from_pretrained(lowerCAmelCase__)
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from."""))
| 155
| 0
|
from math import factorial
def UpperCamelCase (lowercase_: int , lowercase_: int ) -> int:
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(lowercase_ ) // (factorial(lowercase_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
f'''4 for group projects, there are {combinations(40, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f'''are {combinations(10, 3)} ways that first, second and''',
'third place can be awarded.',
)
| 702
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def UpperCamelCase (lowercase_: List[str] , lowercase_: str ) -> Optional[Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A__ : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : Optional[int] = torch.permute(lowercase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowercase_ ):
# linear layer
A__ : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A__ : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def UpperCamelCase (lowercase_: Tuple , lowercase_: Optional[int] , lowercase_: str ) -> Union[str, Any]:
if "metadata" in layer:
A__ : Tuple = layer.split("""metadata""" )
A__ : Optional[Any] = """""".join(split_layer[0] )[:-1]
A__ : Optional[Any] = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
A__ : str = layer.split("""kvstore""" )
A__ : int = """""".join(split_layer[0] )[:-1]
A__ : Optional[int] = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
A__ : Any = layer.split("""/""" )
A__ : int = """/""".join(split_layer[:-1] )
A__ : str = (split_layer[-1],)
if "kvstore/path" in layer:
A__ : Dict = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
A__ : Optional[int] = """file"""
else:
A__ : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def UpperCamelCase (lowercase_: str , lowercase_: List[Any] ) -> int:
A__ : int = rename_keys(lowercase_ )
A__ : Any = {}
for k, v in current_block.items():
A__ : Dict = v
A__ : str = new_current_block
torch.save(lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: Dict , lowercase_: Optional[Any] , lowercase_: Optional[Any] , lowercase_: Optional[int] , lowercase_: str = WEIGHTS_NAME ) -> Tuple:
A__ : Optional[int] = convert_file_size_to_int(lowercase_ )
A__ : List[Any] = []
A__ : int = {}
A__ : List[str] = 0
A__ : Any = 0
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
A__ : Optional[Any] = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
A__ : Dict = flatten_dict(lowercase_ , sep="""/""" )
A__ : Any = {}
for layer in checkpoint_info.keys():
A__ , A__ , A__ : Union[str, Any] = get_key_and_tensorstore_dict(
lowercase_ , lowercase_ , lowercase_ )
if curr_real_layer_name in all_layers:
A__ : Optional[int] = content
else:
A__ : List[Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A__ : Optional[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A__ : List[Any] = torch.tensor(lowercase_ )
A__ : List[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A__ , A__ : Any = rename_base_flax_keys(tuple(key.split("""/""" ) ) , lowercase_ )
A__ : Any = """/""".join(lowercase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A__ : List[Any] = os.path.join(
lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
A__ : Any = {}
A__ : str = 0
A__ : List[str] = raw_weights.to(getattr(lowercase_ , lowercase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A__ : Union[str, Any] = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowercase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A__ : str = {}
A__ : Any = {}
for idx, shard in enumerate(lowercase_ ):
A__ : Any = weights_name.replace(
""".bin""" , f"""-{idx+1:05d}-of-{len(lowercase_ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
A__ : Dict = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
A__ : str = shard
for key in shard:
A__ : Any = shard_file
# Add the metadata
A__ : Tuple = {"""total_size""": total_size}
A__ : Union[str, Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(lowercase_ , lowercase_ ) , """w""" , encoding="""utf-8""" ) as f:
A__ : Dict = json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + """\n"""
f.write(lowercase_ )
return metadata, index
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
A_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def UpperCamelCase () -> int:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A__ : str = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
A__ : str = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
A__ : Tuple = TaTokenizer.from_pretrained("""t5-small""" )
A__ : Dict = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
A__ : Union[str, Any] = tokenizer(lowercase_ , return_tensors="""pt""" ).input_ids
A__ : Tuple = model.generate(lowercase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 64
| 0
|
'''simple docstring'''
import random
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase , lowercase , lowercase = [], [], []
for element in data:
if element < pivot:
less.append(lowerCAmelCase__ )
elif element > pivot:
greater.append(lowerCAmelCase__ )
else:
equal.append(lowerCAmelCase__ )
return less, equal, greater
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if index >= len(lowerCAmelCase__ ) or index < 0:
return None
lowercase = items[random.randint(0 , len(lowerCAmelCase__ ) - 1 )]
lowercase = 0
lowercase , lowercase , lowercase = _partition(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = len(lowerCAmelCase__ )
lowercase = len(lowerCAmelCase__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(lowerCAmelCase__ , lowerCAmelCase__ )
# must be in larger
else:
return quick_select(lowerCAmelCase__ , index - (m + count) )
| 310
|
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__lowerCAmelCase : List[str] =[
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _A ( unittest.TestCase ):
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = None
lowercase = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
lowercase = os.path.abspath("""examples""" )
for item in os.listdir(__lowerCAmelCase ):
if item not in EXCLUDE_EXAMPLES:
lowercase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isfile(__lowerCAmelCase ) and ".py" in item_path:
with self.subTest(
tested_script=__lowerCAmelCase , feature_script=__lowerCAmelCase , tested_section="""main()""" if parser_only else """training_function()""" , ):
lowercase = compare_against_test(
os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase = """\n""".join(__lowerCAmelCase )
if special_strings is not None:
for string in special_strings:
lowercase = diff.replace(__lowerCAmelCase , """""" )
self.assertEqual(__lowerCAmelCase , """""" )
def A__ ( self ):
"""simple docstring"""
self.one_complete_example("""complete_nlp_example.py""" , __lowerCAmelCase )
self.one_complete_example("""complete_nlp_example.py""" , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
lowercase = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.one_complete_example("""complete_cv_example.py""" , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class _A ( lowerCAmelCase ):
snake_case__ : Any = False
@classmethod
def A__ ( cls ):
"""simple docstring"""
super().setUpClass()
lowercase = tempfile.mkdtemp()
lowercase = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
lowercase = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def A__ ( cls ):
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def A__ ( self ):
"""simple docstring"""
lowercase = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def A__ ( self ):
"""simple docstring"""
lowercase = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
lowercase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def A__ ( self ):
"""simple docstring"""
lowercase = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
lowercase = run_command(self._launch_args + testargs , return_stdout=__lowerCAmelCase )
self.assertNotIn("""epoch 0:""" , __lowerCAmelCase )
self.assertIn("""epoch 1:""" , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
lowercase = run_command(self._launch_args + testargs , return_stdout=__lowerCAmelCase )
if torch.cuda.is_available():
lowercase = torch.cuda.device_count()
else:
lowercase = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , __lowerCAmelCase )
self.assertIn("""epoch 1:""" , __lowerCAmelCase )
else:
self.assertIn("""epoch 0:""" , __lowerCAmelCase )
self.assertIn("""epoch 1:""" , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
lowercase = run_command(self._launch_args + testargs , return_stdout=__lowerCAmelCase )
lowercase = re.findall("""({.+})""" , __lowerCAmelCase )
lowercase = [r for r in results if """accuracy""" in r][-1]
lowercase = ast.literal_eval(__lowerCAmelCase )
self.assertGreaterEqual(results["""accuracy"""] , 0.7_5 )
def A__ ( self ):
"""simple docstring"""
lowercase = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def A__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
lowercase = f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__lowerCAmelCase , """tracking""" ) ) )
def A__ ( self ):
"""simple docstring"""
lowercase = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def A__ ( self ):
"""simple docstring"""
lowercase = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 359
| 0
|
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
SCREAMING_SNAKE_CASE_ = '</w>'
SCREAMING_SNAKE_CASE_ = '@@ '
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE = char
return pairs
# Speech2Text2 has no max input length
SCREAMING_SNAKE_CASE_ = {'facebook/s2t-wav2vec2-large-en-de': 1_0_2_4}
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case : Dict = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : int = ["input_ids", "attention_mask"]
def __init__( self : List[str] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : List[str]="<s>" ,lowerCamelCase__ : Tuple="<pad>" ,lowerCamelCase__ : Optional[Any]="</s>" ,lowerCamelCase__ : Optional[Any]="<unk>" ,lowerCamelCase__ : Union[str, Any]=False ,lowerCamelCase__ : Union[str, Any]=None ,**lowerCamelCase__ : Union[str, Any] ,) -> str:
'''simple docstring'''
super().__init__(
unk_token=__lowerCamelCase ,bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,do_lower_case=__lowerCamelCase ,**__lowerCamelCase ,)
SCREAMING_SNAKE_CASE = do_lower_case
with open(__lowerCamelCase ,encoding="""utf-8""" ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
else:
with open(__lowerCamelCase ,encoding="""utf-8""" ) as merges_handle:
SCREAMING_SNAKE_CASE = merges_handle.read().split("""\n""" )[:-1]
SCREAMING_SNAKE_CASE = [tuple(merge.split()[:2] ) for merge in merges]
SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase ,range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE = {}
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
'''simple docstring'''
return len(self.decoder )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE = min(__lowerCamelCase ,key=lambda lowerCamelCase__ : self.bpe_ranks.get(__lowerCamelCase ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE = bigram
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase ,__lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE = ''' '''.join(__lowerCamelCase )
if word == "\n " + BPE_TOKEN_MERGES:
SCREAMING_SNAKE_CASE = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = word.replace(__lowerCamelCase ,"""""" )
SCREAMING_SNAKE_CASE = word.replace(""" """ ,__lowerCamelCase )
SCREAMING_SNAKE_CASE = word
return word
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : List[str] ) -> int:
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
"""This tokenizer was instantiated without a `merges.txt` file, so"""
""" that it can only be used for decoding, not for encoding."""
"""Make sure to provide `merges.txt` file at instantiation to enable """
"""encoding.""" )
if self.do_lower_case:
SCREAMING_SNAKE_CASE = text.lower()
SCREAMING_SNAKE_CASE = text.split()
SCREAMING_SNAKE_CASE = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(""" """ ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : str ) -> int:
'''simple docstring'''
return self.encoder.get(__lowerCamelCase ,self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.decoder.get(__lowerCamelCase ,self.unk_token )
return result
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ''' '''.join(__lowerCamelCase )
# make sure @@ tokens are concatenated
SCREAMING_SNAKE_CASE = ''''''.join(string.split(__lowerCamelCase ) )
return string
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Tuple = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__lowerCamelCase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=__lowerCamelCase ,ensure_ascii=__lowerCamelCase ) + """\n""" )
SCREAMING_SNAKE_CASE = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCamelCase ,"""w""" ,encoding="""utf-8""" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
SCREAMING_SNAKE_CASE = token_index
writer.write(""" """.join(__lowerCamelCase ) + """\n""" )
index += 1
return (vocab_file, merges_file)
| 702
|
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 116
| 0
|
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase__ = 'true'
def lowerCamelCase ( _snake_case ,_snake_case=82 ,_snake_case=16 ):
set_seed(42 )
UpperCAmelCase__ : Optional[int] = RegressionModel()
UpperCAmelCase__ : Tuple = deepcopy(_snake_case )
UpperCAmelCase__ : List[str] = RegressionDataset(length=_snake_case )
UpperCAmelCase__ : List[str] = DataLoader(_snake_case ,batch_size=_snake_case )
model.to(accelerator.device )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = accelerator.prepare(_snake_case ,_snake_case )
return model, ddp_model, dataloader
def lowerCamelCase ( _snake_case ,_snake_case=False ):
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
UpperCAmelCase__ : Tuple = load_dataset('glue' ,'mrpc' ,split='validation' )
def tokenize_function(_snake_case ):
UpperCAmelCase__ : Union[str, Any] = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=_snake_case ,max_length=_snake_case )
return outputs
with accelerator.main_process_first():
UpperCAmelCase__ : Union[str, Any] = dataset.map(
_snake_case ,batched=_snake_case ,remove_columns=['idx', 'sentence1', 'sentence2'] ,)
UpperCAmelCase__ : Optional[int] = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(_snake_case ):
if use_longest:
return tokenizer.pad(_snake_case ,padding='longest' ,return_tensors='pt' )
return tokenizer.pad(_snake_case ,padding='max_length' ,max_length=128 ,return_tensors='pt' )
return DataLoader(_snake_case ,shuffle=_snake_case ,collate_fn=_snake_case ,batch_size=16 )
def lowerCamelCase ( _snake_case ,_snake_case ):
UpperCAmelCase__ : str = Accelerator(dispatch_batches=_snake_case ,split_batches=_snake_case )
UpperCAmelCase__ : Optional[int] = get_dataloader(_snake_case ,not dispatch_batches )
UpperCAmelCase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' ,return_dict=_snake_case )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = accelerator.prepare(_snake_case ,_snake_case )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase__ : Dict = []
for batch in dataloader:
UpperCAmelCase__ , UpperCAmelCase__ : Any = batch.values()
with torch.no_grad():
UpperCAmelCase__ : Tuple = model(_snake_case )
UpperCAmelCase__ , UpperCAmelCase__ : int = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCAmelCase__ , UpperCAmelCase__ : str = [], []
for logit, targ in logits_and_targets:
logits.append(_snake_case )
targs.append(_snake_case )
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = torch.cat(_snake_case ), torch.cat(_snake_case )
return logits, targs
def lowerCamelCase ( _snake_case ,_snake_case=82 ,_snake_case=False ,_snake_case=False ,_snake_case=16 ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = get_basic_setup(_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = generate_predictions(_snake_case ,_snake_case ,_snake_case )
assert (
len(_snake_case ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_snake_case )}'''
def lowerCamelCase ( _snake_case = False ,_snake_case = False ):
UpperCAmelCase__ : List[Any] = evaluate.load('glue' ,'mrpc' )
UpperCAmelCase__ , UpperCAmelCase__ : int = get_mrpc_setup(_snake_case ,_snake_case )
# First do baseline
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = setup['no']
model.to(_snake_case )
model.eval()
for batch in dataloader:
batch.to(_snake_case )
with torch.inference_mode():
UpperCAmelCase__ : List[str] = model(**_snake_case )
UpperCAmelCase__ : Optional[int] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_snake_case ,references=batch['labels'] )
UpperCAmelCase__ : List[Any] = metric.compute()
# Then do distributed
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCAmelCase__ : str = model(**_snake_case )
UpperCAmelCase__ : int = outputs.logits.argmax(dim=-1 )
UpperCAmelCase__ : Optional[Any] = batch['labels']
UpperCAmelCase__ , UpperCAmelCase__ : Dict = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_snake_case ,references=_snake_case )
UpperCAmelCase__ : Any = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def lowerCamelCase ( ):
UpperCAmelCase__ : Optional[Any] = Accelerator(split_batches=_snake_case ,dispatch_batches=_snake_case )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(_snake_case ,_snake_case )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCAmelCase__ : Dict = Accelerator(split_batches=_snake_case ,dispatch_batches=_snake_case )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(_snake_case ,99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
UpperCAmelCase__ : Any = Accelerator()
test_torch_metrics(_snake_case ,512 )
accelerator.state._reset_state()
def lowerCamelCase ( _snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 110
|
"""simple docstring"""
UpperCamelCase__ = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
UpperCamelCase__ = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase__ : int = from_type.lower().strip('s' )
UpperCAmelCase__ : List[Any] = to_type.lower().strip('s' )
UpperCAmelCase__ : Optional[int] = UNIT_SYMBOL.get(_snake_case ,_snake_case )
UpperCAmelCase__ : Optional[Any] = UNIT_SYMBOL.get(_snake_case ,_snake_case )
if from_sanitized not in METRIC_CONVERSION:
UpperCAmelCase__ : Optional[int] = (
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_snake_case )}'''
)
raise ValueError(_snake_case )
if to_sanitized not in METRIC_CONVERSION:
UpperCAmelCase__ : int = (
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_snake_case )}'''
)
raise ValueError(_snake_case )
UpperCAmelCase__ : str = METRIC_CONVERSION[from_sanitized]
UpperCAmelCase__ : Optional[int] = METRIC_CONVERSION[to_sanitized]
UpperCAmelCase__ : int = 1
if from_exponent > to_exponent:
UpperCAmelCase__ : List[str] = from_exponent - to_exponent
else:
UpperCAmelCase__ : int = -(to_exponent - from_exponent)
return value * pow(10 ,_snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 110
| 1
|
from manim import *
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = Rectangle(height=0.5, width=0.5 )
lowercase__ = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 )
lowercase__ = [mem.copy() for i in range(6 )]
lowercase__ = [mem.copy() for i in range(6 )]
lowercase__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase, buff=0 )
lowercase__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase, buff=0 )
lowercase__ = VGroup(lowerCamelCase, lowerCamelCase ).arrange(lowerCamelCase, buff=0 )
lowercase__ = Text('''CPU''', font_size=24 )
lowercase__ = Group(lowerCamelCase, lowerCamelCase ).arrange(lowerCamelCase, buff=0.5, aligned_edge=lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase )
lowercase__ = [mem.copy() for i in range(1 )]
lowercase__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase, buff=0 )
lowercase__ = Text('''GPU''', font_size=24 )
lowercase__ = Group(lowerCamelCase, lowerCamelCase ).arrange(lowerCamelCase, buff=0.5, aligned_edge=lowerCamelCase )
gpu.align_to(lowerCamelCase, lowerCamelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCamelCase )
lowercase__ = [mem.copy() for i in range(6 )]
lowercase__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase, buff=0 )
lowercase__ = Text('''Model''', font_size=24 )
lowercase__ = Group(lowerCamelCase, lowerCamelCase ).arrange(lowerCamelCase, buff=0.5, aligned_edge=lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCamelCase, run_time=1 ), Create(lowerCamelCase, run_time=1 ), Create(lowerCamelCase, run_time=1 ), )
lowercase__ = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""", font_size=24, )
lowercase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase__ = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""", font_size=18, )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase, run_time=2.5 ), Write(lowerCamelCase ), Write(lowerCamelCase ) )
self.add(lowerCamelCase )
lowercase__ = []
lowercase__ = []
lowercase__ = []
for i, rect in enumerate(lowerCamelCase ):
lowercase__ = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase, opacity=0.7 )
cpu_target.move_to(lowerCamelCase )
cpu_target.generate_target()
lowercase__ = 0.46 / 4
lowercase__ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=lowerCamelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target, direction=lowerCamelCase, buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target, direction=lowerCamelCase, buff=0.0 )
cpu_targs.append(lowerCamelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase ) )
second_animations.append(MoveToTarget(lowerCamelCase, run_time=1.5 ) )
self.play(*lowerCamelCase )
self.play(*lowerCamelCase )
self.wait()
| 671
|
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
# we need a list not a string, so do something to change the type
lowercase__ = arr.split(''',''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = [int(self.array[0] )] * len(self.array )
lowercase__ = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
lowercase__ = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
lowercase__ = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
A__ : Dict = input('please input some numbers:')
A__ : Union[str, Any] = SubArray(whole_array)
A__ : int = array.solve_sub_array()
print(('the results is:', re))
| 671
| 1
|
from math import factorial
def _UpperCAmelCase ( A = 20 ):
'''simple docstring'''
UpperCAmelCase__ =2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
UpperCAmelCase__ =n // 2
return int(factorial(A ) / (factorial(A ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
UpperCamelCase_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 625
|
from math import factorial
def _UpperCAmelCase ( A = 20 ):
'''simple docstring'''
UpperCAmelCase__ =2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
UpperCAmelCase__ =n // 2
return int(factorial(A ) / (factorial(A ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
UpperCamelCase_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 625
| 1
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
a__ : int = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
a__ : Optional[Any] = (
subprocess.check_output(F"git diff --diff-filter=d --name-only {fork_point_sha}".split()).decode('''utf-8''').split()
)
a__ : List[str] = '''|'''.join(sys.argv[1:])
a__ : Optional[int] = re.compile(rF"^({joined_dirs}).*?\.py$")
a__ : int = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 713
|
from __future__ import annotations
import pandas as pd
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = [0] * no_of_processes
SCREAMING_SNAKE_CASE : Tuple = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(a__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = burst_time[i]
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : List[str] = 999_999_999
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : List[Any] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(a__ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
SCREAMING_SNAKE_CASE : List[Any] = remaining_time[j]
SCREAMING_SNAKE_CASE : Tuple = j
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
SCREAMING_SNAKE_CASE : Dict = remaining_time[short]
if minm == 0:
SCREAMING_SNAKE_CASE : Dict = 999_999_999
if remaining_time[short] == 0:
complete += 1
SCREAMING_SNAKE_CASE : Any = False
# Find finish time of current process
SCREAMING_SNAKE_CASE : Optional[Any] = increment_time + 1
# Calculate waiting time
SCREAMING_SNAKE_CASE : Optional[int] = finish_time - arrival_time[short]
SCREAMING_SNAKE_CASE : Union[str, Any] = finar - burst_time[short]
if waiting_time[short] < 0:
SCREAMING_SNAKE_CASE : List[Any] = 0
# Increment time
increment_time += 1
return waiting_time
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [0] * no_of_processes
for i in range(a__ ):
SCREAMING_SNAKE_CASE : Optional[int] = burst_time[i] + waiting_time[i]
return turn_around_time
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : List[str] = 0
for i in range(a__ ):
SCREAMING_SNAKE_CASE : Tuple = total_waiting_time + waiting_time[i]
SCREAMING_SNAKE_CASE : List[Any] = total_turn_around_time + turn_around_time[i]
print(F"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print('''Average turn around time =''' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
a__ : Any = int(input())
a__ : Dict = [0] * no_of_processes
a__ : Dict = [0] * no_of_processes
a__ : Optional[int] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
a__ , a__ : int = map(int, input().split())
a__ : List[Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
a__ : Optional[int] = burst_time
a__ : int = no_of_processes
a__ : Dict = waiting_time
a__ : List[str] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
a__ : List[str] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 333
| 0
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowercase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
lowercase_ : Dict = parser.parse_args()
lowercase_ : int = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowercase_ : Any = CLIPImageProcessor()
lowercase_ : str = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
lowercase_ : int = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 64
|
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
UpperCamelCase_ = object()
# For specifying empty leaf dict `{}`
UpperCamelCase_ = object()
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int ) -> Optional[int]:
lowercase : Optional[Any] =tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(__magic_name__ ) - len(__magic_name__ ) + 1 ):
lowercase : Union[str, Any] =[x.match(__magic_name__ ) for x, y in zip(__magic_name__ , ks[i:] )]
if matches and all(__magic_name__ ):
return True
return False
def _lowerCAmelCase ( __magic_name__ : Dict ) -> List[str]:
def replace(__magic_name__ : Optional[int] , __magic_name__ : Optional[Any] ):
for rule, replacement in rules:
if _match(__magic_name__ , __magic_name__ ):
return replacement
return val
return replace
def _lowerCAmelCase ( ) -> int:
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , __magic_name__ )),
(("transformer", "wte", "embedding"), P('''mp''' , __magic_name__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__magic_name__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , __magic_name__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__magic_name__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , __magic_name__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _lowerCAmelCase ( __magic_name__ : str ) -> int:
lowercase : int =_get_partition_rules()
lowercase : Tuple =_replacement_rules(__magic_name__ )
lowercase : Any ={k: _unmatched for k in flatten_dict(__magic_name__ )}
lowercase : Any ={k: replace(__magic_name__ , __magic_name__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__magic_name__ ) )
| 92
| 0
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase_ = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , ):
if attention_mask is None:
__lowerCamelCase : Optional[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__lowerCamelCase : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__lowerCamelCase : Dict = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCamelCase : str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCamelCase : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class A_ :
'''simple docstring'''
def __init__( self: List[Any] , a: List[Any] , a: Union[str, Any]=13 , a: Optional[int]=7 , a: str=True , a: List[Any]=False , a: Dict=99 , a: List[Any]=16 , a: str=2 , a: Union[str, Any]=4 , a: str=4 , a: int="gelu" , a: List[Any]=0.1 , a: Optional[Any]=0.1 , a: Union[str, Any]=32 , a: Optional[Any]=2 , a: Any=1 , a: List[Any]=0 , a: Any=0.0_2 , ):
__lowerCamelCase : List[str] = parent
__lowerCamelCase : int = batch_size
__lowerCamelCase : Tuple = seq_length
__lowerCamelCase : Tuple = is_training
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : Dict = vocab_size
__lowerCamelCase : Any = hidden_size
__lowerCamelCase : Tuple = num_hidden_layers
__lowerCamelCase : Tuple = num_attention_heads
__lowerCamelCase : Optional[Any] = intermediate_size
__lowerCamelCase : str = hidden_act
__lowerCamelCase : Any = hidden_dropout_prob
__lowerCamelCase : Dict = attention_probs_dropout_prob
__lowerCamelCase : Optional[int] = max_position_embeddings
__lowerCamelCase : int = eos_token_id
__lowerCamelCase : Tuple = pad_token_id
__lowerCamelCase : Union[str, Any] = bos_token_id
__lowerCamelCase : List[str] = initializer_range
def _snake_case ( self: List[str] ):
__lowerCamelCase : Dict = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__lowerCamelCase : Tuple = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__lowerCamelCase : int = shift_tokens_right(__lowerCamelCase , 1 , 2 )
__lowerCamelCase : Tuple = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__lowerCamelCase , )
__lowerCamelCase : Tuple = prepare_blenderbot_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : int = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self: Tuple , a: Tuple , a: Tuple , a: Tuple ):
__lowerCamelCase : Any = 20
__lowerCamelCase : Any = model_class_name(__lowerCamelCase )
__lowerCamelCase : Tuple = model.encode(inputs_dict['input_ids'] )
__lowerCamelCase : int = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__lowerCamelCase : str = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase )
__lowerCamelCase : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCamelCase : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCamelCase : Dict = model.decode(
decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
__lowerCamelCase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCamelCase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCamelCase , )
__lowerCamelCase : Dict = model.decode(__lowerCamelCase , __lowerCamelCase )
__lowerCamelCase : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def _snake_case ( self: List[str] , a: Optional[int] , a: Any , a: Optional[Any] ):
__lowerCamelCase : List[Any] = 20
__lowerCamelCase : Optional[int] = model_class_name(__lowerCamelCase )
__lowerCamelCase : List[str] = model.encode(inputs_dict['input_ids'] )
__lowerCamelCase : int = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__lowerCamelCase : Optional[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowerCamelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase )
__lowerCamelCase : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCamelCase : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
__lowerCamelCase : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCamelCase : Dict = model.decode(
decoder_input_ids[:, -1:] , __lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
__lowerCamelCase : Dict = model.decode(__lowerCamelCase , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase )
__lowerCamelCase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
@require_flax
class A_ ( unittest.TestCase ):
'''simple docstring'''
__snake_case = 99
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__lowerCamelCase : Optional[Any] = input_ids.shape[0]
__lowerCamelCase : str = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _snake_case ( self: Tuple ):
__lowerCamelCase : List[str] = self._get_config_and_data()
__lowerCamelCase : Optional[Any] = FlaxBlenderbotForConditionalGeneration(__lowerCamelCase )
__lowerCamelCase : Any = lm_model(input_ids=__lowerCamelCase )
__lowerCamelCase : str = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , __lowerCamelCase )
def _snake_case ( self: Tuple ):
__lowerCamelCase : str = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__lowerCamelCase : Optional[int] = FlaxBlenderbotForConditionalGeneration(__lowerCamelCase )
__lowerCamelCase : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__lowerCamelCase : List[str] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__lowerCamelCase : int = lm_model(input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase )
__lowerCamelCase : List[Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , __lowerCamelCase )
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__lowerCamelCase : int = shift_tokens_right(__lowerCamelCase , 1 , 2 )
__lowerCamelCase : List[Any] = np.equal(__lowerCamelCase , 1 ).astype(np.floataa ).sum()
__lowerCamelCase : Dict = np.equal(__lowerCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__lowerCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class A_ ( lowercase__ , unittest.TestCase , lowercase__ ):
'''simple docstring'''
__snake_case = True
__snake_case = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__snake_case = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : int = FlaxBlenderbotModelTester(self )
def _snake_case ( self: Dict ):
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self: List[Any] ):
__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
__lowerCamelCase : Union[str, Any] = model_class(__lowerCamelCase )
@jax.jit
def encode_jitted(a: List[str] , a: Tuple=None , **a: int ):
return model.encode(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase )
with self.subTest('JIT Enabled' ):
__lowerCamelCase : Optional[Any] = encode_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCamelCase : List[Any] = encode_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase : Union[str, Any] = model_class(__lowerCamelCase )
__lowerCamelCase : str = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
__lowerCamelCase : List[Any] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(a: Dict , a: Any , a: str ):
return model.decode(
decoder_input_ids=__lowerCamelCase , decoder_attention_mask=__lowerCamelCase , encoder_outputs=__lowerCamelCase , )
with self.subTest('JIT Enabled' ):
__lowerCamelCase : Optional[int] = decode_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCamelCase : Any = decode_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _snake_case ( self: Any ):
for model_class_name in self.all_model_classes:
__lowerCamelCase : str = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__lowerCamelCase : int = np.ones((1, 1) ) * model.config.eos_token_id
__lowerCamelCase : Optional[int] = model(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def _snake_case ( self: List[Any] ):
__lowerCamelCase : List[Any] = {"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
__lowerCamelCase : Any = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
__lowerCamelCase : Dict = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=__lowerCamelCase )
__lowerCamelCase : Dict = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
__lowerCamelCase : Dict = ["Sam"]
__lowerCamelCase : List[Any] = tokenizer(__lowerCamelCase , return_tensors='jax' )
__lowerCamelCase : Any = model.generate(**__lowerCamelCase , **__lowerCamelCase )
__lowerCamelCase : List[Any] = "Sam is a great name. It means \"sun\" in Gaelic."
__lowerCamelCase : int = tokenizer.batch_decode(__lowerCamelCase , **__lowerCamelCase )
assert generated_txt[0].strip() == tgt_text
| 705
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'vocab_file': 'sentencepiece.bpe.model'}
lowercase_ = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
lowercase_ = {
'camembert-base': 5_1_2,
}
lowercase_ = '▁'
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self: int , a: Any , a: List[str]="<s>" , a: Optional[int]="</s>" , a: int="</s>" , a: str="<s>" , a: Any="<unk>" , a: Any="<pad>" , a: Optional[int]="<mask>" , a: Tuple=["<s>NOTUSED", "</s>NOTUSED"] , a: Optional[Dict[str, Any]] = None , **a: Dict , ):
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : Optional[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
__lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , additional_special_tokens=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
__lowerCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a ) )
__lowerCamelCase : Union[str, Any] = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__lowerCamelCase : Any = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
__lowerCamelCase : str = len(self.fairseq_tokens_to_ids )
__lowerCamelCase : Optional[Any] = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
__lowerCamelCase : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _snake_case ( self: str , a: List[int] , a: Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase : List[str] = [self.cls_token_id]
__lowerCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self: Dict , a: List[int] , a: Optional[List[int]] = None , a: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def _snake_case ( self: str , a: List[int] , a: Optional[List[int]] = None ):
__lowerCamelCase : Dict = [self.sep_token_id]
__lowerCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self: Union[str, Any] ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def _snake_case ( self: Tuple ):
__lowerCamelCase : Any = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self: Optional[int] , a: str ):
return self.sp_model.encode(a , out_type=a )
def _snake_case ( self: Union[str, Any] , a: str ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(a ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(a )
def _snake_case ( self: int , a: Union[str, Any] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self: List[str] , a: Tuple ):
__lowerCamelCase : int = []
__lowerCamelCase : List[str] = ''
__lowerCamelCase : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a ) + token
__lowerCamelCase : Optional[Any] = True
__lowerCamelCase : Dict = []
else:
current_sub_tokens.append(a )
__lowerCamelCase : Union[str, Any] = False
out_string += self.sp_model.decode(a )
return out_string.strip()
def __getstate__( self: Dict ):
__lowerCamelCase : str = self.__dict__.copy()
__lowerCamelCase : List[str] = None
return state
def __setstate__( self: Optional[int] , a: Optional[Any] ):
__lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowerCamelCase : str = {}
__lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self: Dict , a: str , a: Optional[str] = None ):
if not os.path.isdir(a ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase : str = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , 'wb' ) as fi:
__lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
| 230
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ : Optional[int] = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105
|
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowerCAmelCase = remove_duplicates(key.upper() )
lowerCAmelCase = len(SCREAMING_SNAKE_CASE )
# First fill cipher with key characters
lowerCAmelCase = {alphabet[i]: char for i, char in enumerate(SCREAMING_SNAKE_CASE )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(SCREAMING_SNAKE_CASE ) , 26 ):
lowerCAmelCase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowerCAmelCase = alphabet[i - offset]
lowerCAmelCase = char
return cipher_alphabet
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : dict[str, str] ):
'''simple docstring'''
return "".join(cipher_map.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for ch in message.upper() )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : dict[str, str] ):
'''simple docstring'''
lowerCAmelCase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for ch in message.upper() )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = input("""Enter message to encode or decode: """ ).strip()
lowerCAmelCase = input("""Enter keyword: """ ).strip()
lowerCAmelCase = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
lowerCAmelCase = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
lowerCAmelCase = create_cipher_map(SCREAMING_SNAKE_CASE )
print(func(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 532
| 0
|
class lowercase :
def __init__( self , A_ , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = graph
self._normalize_graph(A_ , A_ )
UpperCamelCase = len(A_ )
UpperCamelCase = None
def __UpperCamelCase ( self , A_ , A_ ) -> str:
"""simple docstring"""
if sources is int:
UpperCamelCase = [sources]
if sinks is int:
UpperCamelCase = [sinks]
if len(A_ ) == 0 or len(A_ ) == 0:
return
UpperCamelCase = sources[0]
UpperCamelCase = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(A_ ) > 1 or len(A_ ) > 1:
UpperCamelCase = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
UpperCamelCase = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
UpperCamelCase = max_input_flow
UpperCamelCase = 0
UpperCamelCase = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
UpperCamelCase = max_input_flow
UpperCamelCase = size - 1
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __UpperCamelCase ( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = algorithm(self )
class lowercase :
def __init__( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = flow_network
UpperCamelCase = flow_network.verticesCount
UpperCamelCase = flow_network.sourceIndex
UpperCamelCase = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
UpperCamelCase = flow_network.graph
UpperCamelCase = False
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
if not self.executed:
self._algorithm()
UpperCamelCase = True
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
pass
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> List[Any]:
"""simple docstring"""
super().__init__(A_ )
# use this to save your result
UpperCamelCase = -1
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> List[str]:
"""simple docstring"""
super().__init__(A_ )
UpperCamelCase = [[0] * self.verticies_count for i in range(self.verticies_count )]
UpperCamelCase = [0] * self.verticies_count
UpperCamelCase = [0] * self.verticies_count
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
UpperCamelCase = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
UpperCamelCase = 0
while i < len(A_ ):
UpperCamelCase = vertices_list[i]
UpperCamelCase = self.heights[vertex_index]
self.process_vertex(A_ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(A_ ) )
UpperCamelCase = 0
else:
i += 1
UpperCamelCase = sum(self.preflow[self.source_index] )
def __UpperCamelCase ( self , A_ ) -> List[Any]:
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(A_ , A_ )
self.relabel(A_ )
def __UpperCamelCase ( self , A_ , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __UpperCamelCase ( self , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
UpperCamelCase = self.heights[to_index]
if min_height is not None:
UpperCamelCase = min_height + 1
if __name__ == "__main__":
_UpperCAmelCase : str = [0]
_UpperCAmelCase : List[str] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
_UpperCAmelCase : Optional[Any] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
_UpperCAmelCase : Any = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
_UpperCAmelCase : str = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 3
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_UpperCAmelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , A_ , A_ = None , A_ = None ) -> Any:
"""simple docstring"""
super().__init__()
UpperCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCamelCase = torch.zeros(A_ , A_ )
else:
UpperCamelCase = None
UpperCamelCase = torch.nn.Parameter(A_ )
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : VQModel
__lowercase : CLIPTextModel
__lowercase : CLIPTokenizer
__lowercase : TransformeraDModel
__lowercase : LearnedClassifierFreeSamplingEmbeddings
__lowercase : VQDiffusionScheduler
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=A_ , transformer=A_ , text_encoder=A_ , tokenizer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = len(A_ ) if isinstance(A_ , A_ ) else 1
# get prompt text embeddings
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase = prompt_embeds.repeat_interleave(A_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(A_ , 1 , 1 )
else:
UpperCamelCase = [''] * batch_size
UpperCamelCase = text_input_ids.shape[-1]
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=A_ , truncation=A_ , return_tensors='pt' , )
UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase = negative_prompt_embeds.shape[1]
UpperCamelCase = negative_prompt_embeds.repeat(1 , A_ , 1 )
UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , A_ , A_ = 100 , A_ = 5.0 , A_ = 1.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(A_ , A_ ):
UpperCamelCase = 1
elif isinstance(A_ , A_ ):
UpperCamelCase = len(A_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(A_ )}''' )
UpperCamelCase = batch_size * num_images_per_prompt
UpperCamelCase = guidance_scale > 1.0
UpperCamelCase = self._encode_prompt(A_ , A_ , A_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A_ , A_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(A_ )}.''' )
# get the initial completely masked latents unless the user supplied it
UpperCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCamelCase = self.transformer.num_vector_embeds - 1
UpperCamelCase = torch.full(A_ , A_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
UpperCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A_ , device=self.device )
UpperCamelCase = self.scheduler.timesteps.to(self.device )
UpperCamelCase = latents
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the sample if we are doing classifier free guidance
UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCamelCase = self.transformer(A_ , encoder_hidden_states=A_ , timestep=A_ ).sample
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase = model_output.chunk(2 )
UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(A_ , dim=1 , keepdim=A_ )
UpperCamelCase = self.truncate(A_ , A_ )
# remove `log(0)`'s (`-inf`s)
UpperCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(A_ , timestep=A_ , sample=A_ , generator=A_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A_ , A_ , A_ )
UpperCamelCase = self.vqvae.config.vq_embed_dim
UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCamelCase = self.vqvae.quantize.get_codebook_entry(A_ , shape=A_ )
UpperCamelCase = self.vqvae.decode(A_ , force_not_quantize=A_ ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
def __UpperCamelCase ( self , A_ , A_ ) -> torch.FloatTensor:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = torch.sort(A_ , 1 , descending=A_ )
UpperCamelCase = torch.exp(A_ )
UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , A_ )
UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
UpperCamelCase = keep_mask[:, :-1, :]
UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
UpperCamelCase = log_p_x_0.clone()
UpperCamelCase = -torch.inf # -inf = log(0)
return rv
| 3
| 1
|
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = """▁"""
__UpperCAmelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = BertGenerationTokenizer
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[int] = BertGenerationTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = """<s>"""
SCREAMING_SNAKE_CASE : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowerCamelCase_ ) , 10_02 )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = BertGenerationTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [2_85, 46, 10, 1_70, 3_82] , )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = """Hello World!"""
SCREAMING_SNAKE_CASE : Any = [1_85_36, 22_60, 1_01]
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
SCREAMING_SNAKE_CASE : Tuple = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@require_torch
@slow
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
SCREAMING_SNAKE_CASE : List[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
SCREAMING_SNAKE_CASE : Tuple = """ """.join(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.big_tokenizer.encode_plus(lowerCamelCase_ , return_tensors="""pt""" , return_token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = BertGenerationConfig()
SCREAMING_SNAKE_CASE : Optional[Any] = BertGenerationEncoder(lowerCamelCase_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase_ )
model(**lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 379
|
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__UpperCAmelCase = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Union[str, Any] = torchvision.models.resnetaaa(pretrained=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = list(model.children() )[:-2]
SCREAMING_SNAKE_CASE : Any = nn.Sequential(*lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.pool(self.model(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : int = torch.flatten(lowerCamelCase_ , start_dim=2 )
SCREAMING_SNAKE_CASE : List[Any] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [json.loads(lowerCamelCase_ ) for l in open(lowerCamelCase_ )]
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.dirname(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = tokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = labels
SCREAMING_SNAKE_CASE : str = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = max_seq_length
SCREAMING_SNAKE_CASE : List[Any] = transforms
def __len__( self : Any ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : Optional[int] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = sentence[0], sentence[1:-1], sentence[-1]
SCREAMING_SNAKE_CASE : int = sentence[: self.max_seq_length]
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(self.n_classes )
SCREAMING_SNAKE_CASE : List[str] = 1
SCREAMING_SNAKE_CASE : int = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
SCREAMING_SNAKE_CASE : str = self.transforms(lowerCamelCase_ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [len(row["""sentence"""] ) for row in batch]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ ), max(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = torch.zeros(lowerCamelCase_ , lowerCamelCase_ , dtype=torch.long )
SCREAMING_SNAKE_CASE : Any = torch.zeros(lowerCamelCase_ , lowerCamelCase_ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE : Union[str, Any] = input_row["""sentence"""]
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
SCREAMING_SNAKE_CASE : List[str] = torch.stack([row["""image"""] for row in batch] )
SCREAMING_SNAKE_CASE : int = torch.stack([row["""label"""] for row in batch] )
SCREAMING_SNAKE_CASE : str = torch.stack([row["""image_start_token"""] for row in batch] )
SCREAMING_SNAKE_CASE : Any = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def __A ( ):
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def __A ( ):
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 379
| 1
|
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = False , lowercase__ = None , lowercase__ = True , lowercase__ = "arrow" , **lowercase__ , ):
super().__init__(
split=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ , streaming=lowercase__ , **lowercase__ , )
snake_case_ : Dict = load_from_cache_file
snake_case_ : List[str] = file_format
snake_case_ : Optional[Any] = Spark(
df=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , working_dir=lowercase__ , **lowercase__ , )
def __UpperCamelCase (self ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
snake_case_ : List[Any] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowercase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 719
|
"""simple docstring"""
from manim import *
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 )
snake_case_ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : str = [mem.copy() for i in range(6 )]
snake_case_ : str = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Any = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = VGroup(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[Any] = Text("""CPU""" , font_size=24 )
snake_case_ : Tuple = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase__ )
snake_case_ : List[Any] = [mem.copy() for i in range(4 )]
snake_case_ : Tuple = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : List[str] = Text("""GPU""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase__ )
snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case_ : List[Any] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : Dict = Text("""Model""" , font_size=24 )
snake_case_ : int = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
model.move_to([3, -1.0, 0] )
self.add(lowercase__ )
snake_case_ : Dict = []
for i, rect in enumerate(lowercase__ ):
rect.set_stroke(lowercase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
snake_case_ : List[str] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase__ , buff=0.0 )
self.add(lowercase__ )
cpu_targs.append(lowercase__ )
snake_case_ : List[str] = [mem.copy() for i in range(6 )]
snake_case_ : List[str] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
snake_case_ : str = Text("""Loaded Checkpoint""" , font_size=24 )
snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , aligned_edge=lowercase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
snake_case_ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case_ : Union[str, Any] = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase__ , lowercase__ )
snake_case_ : List[Any] = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowercase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
snake_case_ : List[Any] = MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase__ ) , Write(lowercase__ ) )
self.play(Write(lowercase__ , run_time=1 ) , Create(lowercase__ , run_time=1 ) )
snake_case_ : Optional[int] = []
snake_case_ : List[str] = []
for i, rect in enumerate(lowercase__ ):
snake_case_ : Optional[Any] = fill.copy().set_fill(lowercase__ , opacity=0.7 )
target.move_to(lowercase__ )
first_animations.append(GrowFromCenter(lowercase__ , run_time=1 ) )
snake_case_ : List[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase__ , run_time=1.5 ) )
self.play(*lowercase__ )
self.play(*lowercase__ )
self.wait()
| 48
| 0
|
def __A ( __lowerCamelCase = 1000 ) -> int:
a = 3
a = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 468
|
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
a = RobertaPreLayerNormConfig.from_pretrained(
__lowerCamelCase , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
a = torch.load(hf_hub_download(repo_id=__lowerCamelCase , filename="""pytorch_model.bin""" ) )
a = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
a = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
a = tensor_value
a = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__lowerCamelCase , config=__lowerCamelCase , state_dict=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
# convert tokenizer
a = AutoTokenizer.from_pretrained(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint-repo",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCamelCase : Any = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 468
| 1
|
'''simple docstring'''
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
_A : Optional[Any] =version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase=False, ) -> Tuple:
output_path.parent.mkdir(parents=_lowercase, exist_ok=_lowercase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_lowercase, _lowercase, f=output_path.as_posix(), input_names=_lowercase, output_names=_lowercase, dynamic_axes=_lowercase, do_constant_folding=_lowercase, use_external_data_format=_lowercase, enable_onnx_checker=_lowercase, opset_version=_lowercase, )
else:
export(
_lowercase, _lowercase, f=output_path.as_posix(), input_names=_lowercase, output_names=_lowercase, dynamic_axes=_lowercase, do_constant_folding=_lowercase, opset_version=_lowercase, )
@torch.no_grad()
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase = False ) -> Dict:
_lowercase : Dict = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_lowercase : str = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
_lowercase : int = '''cpu'''
_lowercase : Dict = StableDiffusionPipeline.from_pretrained(_lowercase, torch_dtype=_lowercase ).to(_lowercase )
_lowercase : str = Path(_lowercase )
# TEXT ENCODER
_lowercase : List[str] = pipeline.text_encoder.config.max_position_embeddings
_lowercase : Any = pipeline.text_encoder.config.hidden_size
_lowercase : Tuple = pipeline.tokenizer(
'A sample prompt', padding='max_length', max_length=pipeline.tokenizer.model_max_length, truncation=_lowercase, return_tensors='pt', )
onnx_export(
pipeline.text_encoder, model_args=(text_input.input_ids.to(device=_lowercase, dtype=torch.intaa )), output_path=output_path / 'text_encoder' / 'model.onnx', ordered_input_names=['input_ids'], output_names=['last_hidden_state', 'pooler_output'], dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
}, opset=_lowercase, )
del pipeline.text_encoder
# UNET
_lowercase : Any = pipeline.unet.config.in_channels
_lowercase : Tuple = pipeline.unet.config.sample_size
_lowercase : Union[str, Any] = output_path / '''unet''' / '''model.onnx'''
onnx_export(
pipeline.unet, model_args=(
torch.randn(2, _lowercase, _lowercase, _lowercase ).to(device=_lowercase, dtype=_lowercase ),
torch.randn(2 ).to(device=_lowercase, dtype=_lowercase ),
torch.randn(2, _lowercase, _lowercase ).to(device=_lowercase, dtype=_lowercase ),
False,
), output_path=_lowercase, ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'], output_names=['out_sample'], dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
}, opset=_lowercase, use_external_data_format=_lowercase, )
_lowercase : Any = str(unet_path.absolute().as_posix() )
_lowercase : Any = os.path.dirname(_lowercase )
_lowercase : Any = onnx.load(_lowercase )
# clean up existing tensor files
shutil.rmtree(_lowercase )
os.mkdir(_lowercase )
# collate external tensor files into one
onnx.save_model(
_lowercase, _lowercase, save_as_external_data=_lowercase, all_tensors_to_one_file=_lowercase, location='weights.pb', convert_attribute=_lowercase, )
del pipeline.unet
# VAE ENCODER
_lowercase : Dict = pipeline.vae
_lowercase : Optional[int] = vae_encoder.config.in_channels
_lowercase : Union[str, Any] = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
_lowercase : List[str] = lambda _lowercase, _lowercase : vae_encoder.encode(_lowercase, _lowercase )[0].sample()
onnx_export(
_lowercase, model_args=(
torch.randn(1, _lowercase, _lowercase, _lowercase ).to(device=_lowercase, dtype=_lowercase ),
False,
), output_path=output_path / 'vae_encoder' / 'model.onnx', ordered_input_names=['sample', 'return_dict'], output_names=['latent_sample'], dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
}, opset=_lowercase, )
# VAE DECODER
_lowercase : List[Any] = pipeline.vae
_lowercase : Tuple = vae_decoder.config.latent_channels
_lowercase : List[str] = vae_decoder.config.out_channels
# forward only through the decoder part
_lowercase : Optional[Any] = vae_encoder.decode
onnx_export(
_lowercase, model_args=(
torch.randn(1, _lowercase, _lowercase, _lowercase ).to(device=_lowercase, dtype=_lowercase ),
False,
), output_path=output_path / 'vae_decoder' / 'model.onnx', ordered_input_names=['latent_sample', 'return_dict'], output_names=['sample'], dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
}, opset=_lowercase, )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
_lowercase : int = pipeline.safety_checker
_lowercase : str = safety_checker.config.vision_config.num_channels
_lowercase : int = safety_checker.config.vision_config.image_size
_lowercase : Optional[Any] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker, model_args=(
torch.randn(
1, _lowercase, _lowercase, _lowercase, ).to(device=_lowercase, dtype=_lowercase ),
torch.randn(1, _lowercase, _lowercase, _lowercase ).to(device=_lowercase, dtype=_lowercase ),
), output_path=output_path / 'safety_checker' / 'model.onnx', ordered_input_names=['clip_input', 'images'], output_names=['out_images', 'has_nsfw_concepts'], dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
}, opset=_lowercase, )
del pipeline.safety_checker
_lowercase : List[Any] = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
_lowercase : List[str] = pipeline.feature_extractor
else:
_lowercase : Union[str, Any] = None
_lowercase : int = None
_lowercase : List[str] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ), vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ), text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ), tokenizer=pipeline.tokenizer, unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ), scheduler=pipeline.scheduler, safety_checker=_lowercase, feature_extractor=_lowercase, requires_safety_checker=safety_checker is not None, )
onnx_pipeline.save_pretrained(_lowercase )
print('ONNX pipeline saved to', _lowercase )
del pipeline
del onnx_pipeline
_lowercase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(_lowercase, provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=1_4,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
_A : List[str] =parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 717
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Optional[Any] ={'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : List[str] ) -> Tuple:
__UpperCAmelCase =TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__UpperCAmelCase =tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )["""last_hidden_state"""]
__UpperCAmelCase =tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
__UpperCAmelCase =tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 68
|
from math import factorial, radians
def __A ( __lowerCamelCase , __lowerCamelCase = 18 , __lowerCamelCase = 10 ) -> float:
a = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
a = radians(__lowerCamelCase )
a = angle_in_radians
a = 3
a = -1
for _ in range(__lowerCamelCase ):
result += (b * (angle_in_radians**a)) / factorial(__lowerCamelCase )
a = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
__import__("doctest").testmod()
| 468
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : int = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __snake_case (_a ):
lowerCAmelCase__ = "wav2vec2"
def __init__( self : Optional[Any] , _UpperCAmelCase : Union[str, Any]=32 , _UpperCAmelCase : Tuple=768 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : List[str]=3072 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : List[str]=1E-5 , _UpperCAmelCase : int="group" , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : Tuple=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase : List[str]=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Dict=128 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=0.05 , _UpperCAmelCase : Union[str, Any]=10 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : Optional[Any]=10 , _UpperCAmelCase : str=0 , _UpperCAmelCase : Any=320 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Tuple=100 , _UpperCAmelCase : Tuple=256 , _UpperCAmelCase : Any=256 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Optional[Any]="sum" , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : List[Any]=256 , _UpperCAmelCase : Tuple=(512, 512, 512, 512, 1500) , _UpperCAmelCase : List[str]=(5, 3, 3, 1, 1) , _UpperCAmelCase : Optional[Any]=(1, 2, 3, 1, 1) , _UpperCAmelCase : Dict=512 , _UpperCAmelCase : List[str]=0 , _UpperCAmelCase : Dict=1 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Any=False , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : int=3 , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Dict=None , **_UpperCAmelCase : Optional[int] , ) -> str:
'''simple docstring'''
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : Union[str, Any] = feat_extract_norm
_lowerCAmelCase : str = feat_extract_activation
_lowerCAmelCase : Any = list(_UpperCAmelCase )
_lowerCAmelCase : int = list(_UpperCAmelCase )
_lowerCAmelCase : int = list(_UpperCAmelCase )
_lowerCAmelCase : int = conv_bias
_lowerCAmelCase : Dict = num_conv_pos_embeddings
_lowerCAmelCase : List[str] = num_conv_pos_embedding_groups
_lowerCAmelCase : List[str] = len(self.conv_dim )
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Tuple = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : Optional[int] = hidden_dropout
_lowerCAmelCase : Union[str, Any] = attention_dropout
_lowerCAmelCase : str = activation_dropout
_lowerCAmelCase : Optional[Any] = feat_proj_dropout
_lowerCAmelCase : Optional[int] = final_dropout
_lowerCAmelCase : List[Any] = layerdrop
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Tuple = vocab_size
_lowerCAmelCase : Any = do_stable_layer_norm
_lowerCAmelCase : str = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : List[str] = apply_spec_augment
_lowerCAmelCase : Tuple = mask_time_prob
_lowerCAmelCase : Optional[Any] = mask_time_length
_lowerCAmelCase : Union[str, Any] = mask_time_min_masks
_lowerCAmelCase : str = mask_feature_prob
_lowerCAmelCase : str = mask_feature_length
_lowerCAmelCase : List[str] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : Optional[int] = num_codevectors_per_group
_lowerCAmelCase : List[Any] = num_codevector_groups
_lowerCAmelCase : Any = contrastive_logits_temperature
_lowerCAmelCase : List[Any] = feat_quantizer_dropout
_lowerCAmelCase : List[str] = num_negatives
_lowerCAmelCase : Union[str, Any] = codevector_dim
_lowerCAmelCase : List[Any] = proj_codevector_dim
_lowerCAmelCase : str = diversity_loss_weight
# ctc loss
_lowerCAmelCase : List[str] = ctc_loss_reduction
_lowerCAmelCase : Union[str, Any] = ctc_zero_infinity
# adapter
_lowerCAmelCase : str = add_adapter
_lowerCAmelCase : int = adapter_kernel_size
_lowerCAmelCase : str = adapter_stride
_lowerCAmelCase : List[str] = num_adapter_layers
_lowerCAmelCase : Union[str, Any] = output_hidden_size or hidden_size
_lowerCAmelCase : Any = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : Tuple = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : List[Any] = list(_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = list(_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = list(_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 196
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : List[str] ) -> List[str]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
_lowerCAmelCase : List[Any] = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Any = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : str = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : List[str] = """sgugger/tiny-distilbert-classification"""
_lowerCAmelCase : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , only_pretrain_model=_UpperCAmelCase , )
_lowerCAmelCase : List[Any] = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
'''simple docstring'''
_lowerCAmelCase : Tuple = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , torchscript=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : List[str] = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , fpaa=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Union[str, Any] = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
'''simple docstring'''
_lowerCAmelCase : List[Any] = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : int = AutoConfig.from_pretrained(_UpperCAmelCase )
# set architectures equal to `None`
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Tuple = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
_lowerCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
'''simple docstring'''
_lowerCAmelCase : str = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : List[str] = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : int = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_UpperCAmelCase , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Union[str, Any] = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(_UpperCAmelCase )
_lowerCAmelCase : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : List[str] = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
_lowerCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Any = """sshleifer/tinier_bart"""
_lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_UpperCAmelCase )
_lowerCAmelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Tuple = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
_lowerCAmelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_UpperCAmelCase )
_lowerCAmelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Optional[Any] = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
_lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Any = """sshleifer/tinier_bart"""
_lowerCAmelCase : Dict = AutoConfig.from_pretrained(_UpperCAmelCase )
_lowerCAmelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Optional[Any] = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
_lowerCAmelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
'''simple docstring'''
_lowerCAmelCase : Tuple = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , save_to_csv=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_UpperCAmelCase , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(_UpperCAmelCase , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(_UpperCAmelCase , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(_UpperCAmelCase , """train_time.csv""" ) , env_info_csv_file=os.path.join(_UpperCAmelCase , """env.csv""" ) , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : List[Any] = PyTorchBenchmark(_UpperCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """env.csv""" ) ).exists() )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : str = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(_UpperCAmelCase : int ):
self.assertTrue(hasattr(_UpperCAmelCase , """sequential""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """cumulative""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """current""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_UpperCAmelCase , """log.txt""" ) , log_print=_UpperCAmelCase , trace_memory_line_by_line=_UpperCAmelCase , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : str = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """log.txt""" ) ).exists() )
| 196
| 1
|
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__A = "CompVis/stable-diffusion-v1-1"
__A = "CompVis/stable-diffusion-v1-2"
__A = "CompVis/stable-diffusion-v1-3"
__A = "CompVis/stable-diffusion-v1-4"
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : CLIPTextModel , UpperCAmelCase_ : CLIPTokenizer , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase_ : StableDiffusionSafetyChecker , UpperCAmelCase_ : CLIPImageProcessor , UpperCAmelCase_ : bool = True , ) ->Union[str, Any]:
'''simple docstring'''
super()._init_()
lowerCamelCase__: Tuple =StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
lowerCamelCase__: List[str] =StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
lowerCamelCase__: str =StableDiffusionPipeline(
vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , requires_safety_checker=UpperCAmelCase_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea)
@property
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Dict[str, Any]:
'''simple docstring'''
return {k: getattr(self , UpperCAmelCase_) for k in self.config.keys() if not k.startswith("_")}
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Optional[Union[str, int]] = "auto") ->Tuple:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase__: Tuple =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Optional[Any]:
'''simple docstring'''
self.enable_attention_slicing(UpperCAmelCase_)
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Optional[int] , ) ->Dict:
'''simple docstring'''
return self.pipea(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : int , ) ->int:
'''simple docstring'''
return self.pipea(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Dict , ) ->List[str]:
'''simple docstring'''
return self.pipea(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : List[str] , ) ->List[str]:
'''simple docstring'''
return self.pipea(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : List[Any] , ) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Dict ="cuda" if torch.cuda.is_available() else "cpu"
self.to(UpperCAmelCase_)
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""")
# Get first result from Stable Diffusion Checkpoint v1.1
lowerCamelCase__: int =self.textaimg_sda_a(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
lowerCamelCase__: Any =self.textaimg_sda_a(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
lowerCamelCase__: Dict =self.textaimg_sda_a(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
lowerCamelCase__: Tuple =self.textaimg_sda_a(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]])
| 59
|
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowercase__ : Optional[Any] = logging.get_logger(__name__)
# General docstring
lowercase__ : Optional[int] = "PoolFormerConfig"
# Base docstring
lowercase__ : Optional[Any] = "sail/poolformer_s12"
lowercase__ : Union[str, Any] = [1, 512, 7, 7]
# Image classification docstring
lowercase__ : List[str] = "sail/poolformer_s12"
lowercase__ : Dict = "tabby, tabby cat"
lowercase__ : Union[str, Any] = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowerCamelCase__ ( _A , _A = 0.0 , _A = False ):
'''simple docstring'''
if drop_prob == 0.0 or not training:
return input
snake_case_ = 1 - drop_prob
snake_case_ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
snake_case_ = keep_prob + torch.rand(_A , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
snake_case_ = input.div(_A ) * random_tensor
return output
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowercase : Optional[float] = None ):
"""simple docstring"""
super().__init__()
snake_case_ = drop_prob
def snake_case__ ( self : List[str] , __lowercase : torch.Tensor ):
"""simple docstring"""
return drop_path(__lowercase , self.drop_prob , self.training )
def snake_case__ ( self : Any ):
"""simple docstring"""
return "p={}".format(self.drop_prob )
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowercase : Dict , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Dict=None ):
"""simple docstring"""
super().__init__()
snake_case_ = patch_size if isinstance(__lowercase , collections.abc.Iterable ) else (patch_size, patch_size)
snake_case_ = stride if isinstance(__lowercase , collections.abc.Iterable ) else (stride, stride)
snake_case_ = padding if isinstance(__lowercase , collections.abc.Iterable ) else (padding, padding)
snake_case_ = nn.Convad(__lowercase , __lowercase , kernel_size=__lowercase , stride=__lowercase , padding=__lowercase )
snake_case_ = norm_layer(__lowercase ) if norm_layer else nn.Identity()
def snake_case__ ( self : Any , __lowercase : Optional[Any] ):
"""simple docstring"""
snake_case_ = self.projection(__lowercase )
snake_case_ = self.norm(__lowercase )
return embeddings
class UpperCAmelCase ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self : int , __lowercase : List[str] , **__lowercase : Union[str, Any] ):
"""simple docstring"""
super().__init__(1 , __lowercase , **__lowercase )
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __lowercase : List[str] ):
"""simple docstring"""
super().__init__()
snake_case_ = nn.AvgPoolad(__lowercase , stride=1 , padding=pool_size // 2 , count_include_pad=__lowercase )
def snake_case__ ( self : int , __lowercase : Tuple ):
"""simple docstring"""
return self.pool(__lowercase ) - hidden_states
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : Optional[Any] ):
"""simple docstring"""
super().__init__()
snake_case_ = nn.Convad(__lowercase , __lowercase , 1 )
snake_case_ = nn.Convad(__lowercase , __lowercase , 1 )
snake_case_ = PoolFormerDropPath(__lowercase )
if isinstance(config.hidden_act , __lowercase ):
snake_case_ = ACTaFN[config.hidden_act]
else:
snake_case_ = config.hidden_act
def snake_case__ ( self : Union[str, Any] , __lowercase : int ):
"""simple docstring"""
snake_case_ = self.conva(__lowercase )
snake_case_ = self.act_fn(__lowercase )
snake_case_ = self.drop(__lowercase )
snake_case_ = self.conva(__lowercase )
snake_case_ = self.drop(__lowercase )
return hidden_states
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , __lowercase : List[str] , __lowercase : Optional[Any] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : int ):
"""simple docstring"""
super().__init__()
snake_case_ = PoolFormerPooling(__lowercase )
snake_case_ = PoolFormerOutput(__lowercase , __lowercase , __lowercase , __lowercase )
snake_case_ = PoolFormerGroupNorm(__lowercase )
snake_case_ = PoolFormerGroupNorm(__lowercase )
# Useful for training neural nets
snake_case_ = PoolFormerDropPath(__lowercase ) if drop_path > 0.0 else nn.Identity()
snake_case_ = config.use_layer_scale
if config.use_layer_scale:
snake_case_ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__lowercase) ) , requires_grad=__lowercase )
snake_case_ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__lowercase) ) , requires_grad=__lowercase )
def snake_case__ ( self : Union[str, Any] , __lowercase : Union[str, Any] ):
"""simple docstring"""
if self.use_layer_scale:
snake_case_ = self.pooling(self.before_norm(__lowercase ) )
snake_case_ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
snake_case_ = hidden_states + self.drop_path(__lowercase )
snake_case_ = ()
snake_case_ = self.output(self.after_norm(__lowercase ) )
snake_case_ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
snake_case_ = hidden_states + self.drop_path(__lowercase )
snake_case_ = (output,) + outputs
return outputs
else:
snake_case_ = self.drop_path(self.pooling(self.before_norm(__lowercase ) ) )
# First residual connection
snake_case_ = pooling_output + hidden_states
snake_case_ = ()
# Second residual connection inside the PoolFormerOutput block
snake_case_ = self.drop_path(self.output(self.after_norm(__lowercase ) ) )
snake_case_ = hidden_states + layer_output
snake_case_ = (output,) + outputs
return outputs
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , __lowercase : Optional[Any] ):
"""simple docstring"""
super().__init__()
snake_case_ = config
# stochastic depth decay rule
snake_case_ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
snake_case_ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
snake_case_ = nn.ModuleList(__lowercase )
# Transformer blocks
snake_case_ = []
snake_case_ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
snake_case_ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__lowercase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__lowercase ) )
snake_case_ = nn.ModuleList(__lowercase )
def snake_case__ ( self : List[str] , __lowercase : List[Any] , __lowercase : int=False , __lowercase : Tuple=True ):
"""simple docstring"""
snake_case_ = () if output_hidden_states else None
snake_case_ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
snake_case_ , snake_case_ = layers
# Get patch embeddings from hidden_states
snake_case_ = embedding_layer(__lowercase )
# Send the embeddings through the blocks
for _, blk in enumerate(__lowercase ):
snake_case_ = blk(__lowercase )
snake_case_ = layer_outputs[0]
if output_hidden_states:
snake_case_ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__lowercase , hidden_states=__lowercase )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = PoolFormerConfig
lowerCAmelCase_ = '''poolformer'''
lowerCAmelCase_ = '''pixel_values'''
lowerCAmelCase_ = True
def snake_case__ ( self : Dict , __lowercase : Optional[Any] ):
"""simple docstring"""
if isinstance(__lowercase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__lowercase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def snake_case__ ( self : str , __lowercase : Any , __lowercase : Tuple=False ):
"""simple docstring"""
if isinstance(__lowercase , __lowercase ):
snake_case_ = value
lowercase__ : Optional[int] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
lowercase__ : List[str] = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , UpperCAmelCase__ , )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Any , __lowercase : Any ):
"""simple docstring"""
super().__init__(__lowercase )
snake_case_ = config
snake_case_ = PoolFormerEncoder(__lowercase )
# Initialize weights and apply final processing
self.post_init()
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case__ ( self : Optional[Any] , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[bool] = None , ):
"""simple docstring"""
snake_case_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
snake_case_ = self.encoder(
__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , )
snake_case_ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__lowercase , hidden_states=encoder_outputs.hidden_states , )
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __lowercase : Tuple ):
"""simple docstring"""
super().__init__()
snake_case_ = nn.Linear(config.hidden_size , config.hidden_size )
def snake_case__ ( self : Dict , __lowercase : List[str] ):
"""simple docstring"""
snake_case_ = self.dense(__lowercase )
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , UpperCAmelCase__ , )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Optional[int] , __lowercase : int ):
"""simple docstring"""
super().__init__(__lowercase )
snake_case_ = config.num_labels
snake_case_ = PoolFormerModel(__lowercase )
# Final norm
snake_case_ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
snake_case_ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case__ ( self : str , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[torch.LongTensor] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[bool] = None , ):
"""simple docstring"""
snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ = self.poolformer(
__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , )
snake_case_ = outputs[0]
snake_case_ = self.classifier(self.norm(__lowercase ).mean([-2, -1] ) )
snake_case_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case_ = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case_ = "single_label_classification"
else:
snake_case_ = "multi_label_classification"
if self.config.problem_type == "regression":
snake_case_ = MSELoss()
if self.num_labels == 1:
snake_case_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case_ = loss_fct(__lowercase , __lowercase )
elif self.config.problem_type == "single_label_classification":
snake_case_ = CrossEntropyLoss()
snake_case_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case_ = BCEWithLogitsLoss()
snake_case_ = loss_fct(__lowercase , __lowercase )
if not return_dict:
snake_case_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__lowercase , logits=__lowercase , hidden_states=outputs.hidden_states )
| 376
| 0
|
'''simple docstring'''
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowercase : Any = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
lowercase : Tuple = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A ) -> List[str]:
for attribute in key.split('.' ):
_snake_case = getattr(__A , __A )
if weight_type is not None:
_snake_case = getattr(__A , __A ).shape
else:
_snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_snake_case = value
elif weight_type == "weight_g":
_snake_case = value
elif weight_type == "weight_v":
_snake_case = value
elif weight_type == "bias":
_snake_case = value
else:
_snake_case = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Dict:
_snake_case = []
_snake_case = fairseq_model.state_dict()
_snake_case = hf_model.feature_extractor
_snake_case = hf_model.adapter
for name, value in fairseq_dict.items():
_snake_case = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == 'group' , )
_snake_case = True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(__A , __A , __A , __A )
_snake_case = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_snake_case = True
if "*" in mapped_key:
_snake_case = name.split(__A )[0].split('.' )[-2]
_snake_case = mapped_key.replace('*' , __A )
if "weight_g" in name:
_snake_case = 'weight_g'
elif "weight_v" in name:
_snake_case = 'weight_v'
elif "bias" in name:
_snake_case = 'bias'
elif "weight" in name:
_snake_case = 'weight'
else:
_snake_case = None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(F'Unused weights: {unused_weights}' )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A ) -> Dict:
_snake_case = full_name.split('conv_layers.' )[-1]
_snake_case = name.split('.' )
_snake_case = int(items[0] )
_snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_snake_case = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__A )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A ) -> Optional[Any]:
_snake_case = full_name.split('adaptor.' )[-1]
_snake_case = name.split('.' )
if items[1].isdigit():
_snake_case = int(items[1] )
else:
_snake_case = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
_snake_case = value
logger.info(F'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
_snake_case = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
_snake_case = value
logger.info(F'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
_snake_case = value
logger.info(F'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(__A , __A ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
_snake_case = value
logger.info(F'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
_snake_case = value
logger.info(F'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(__A )
def SCREAMING_SNAKE_CASE__ ( __A ) -> int:
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(__A , __A , bias=__A )
_snake_case = emb.weight.data
return lin_layer
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) -> Any:
_snake_case = WavaVecaConfig.from_pretrained(
__A , add_adapter=__A , adapter_stride=__A , adapter_kernel_size=__A , use_auth_token=__A , output_hidden_size=__A , )
_snake_case = MBartConfig.from_pretrained(__A )
# load model
_snake_case , _snake_case , _snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} , )
_snake_case = model[0].eval()
# load feature extractor
_snake_case = WavaVecaFeatureExtractor.from_pretrained(__A , use_auth_token=__A )
# set weights for wav2vec2 encoder
_snake_case = WavaVecaModel(__A )
recursively_load_weights_wavaveca(model.encoder , __A )
# load decoder weights
_snake_case = MBartForCausalLM(__A )
_snake_case , _snake_case = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__A )
logger.warning(F'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(F'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
_snake_case = SpeechEncoderDecoderModel(encoder=__A , decoder=__A )
_snake_case = False
_snake_case = MBartaaTokenizer(__A )
tokenizer.save_pretrained(__A )
_snake_case = hf_wavavec.config.to_dict()
_snake_case = tokenizer.pad_token_id
_snake_case = tokenizer.bos_token_id
_snake_case = tokenizer.eos_token_id
_snake_case = 'mbart50'
_snake_case = 'wav2vec2'
_snake_case = tokenizer.eos_token_id
_snake_case = 250_004
_snake_case = tokenizer.eos_token_id
_snake_case = SpeechEncoderDecoderConfig.from_dict(__A )
hf_wavavec.save_pretrained(__A )
feature_extractor.save_pretrained(__A )
if __name__ == "__main__":
lowercase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=25_0004, type=int, help="`decoder_start_token_id` of model config")
lowercase : List[str] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 542
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __UpperCAmelCase :
@staticmethod
def lowerCamelCase ( *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
__lowercase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
_snake_case = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = object_detector(examples[0] , threshold=0.0 )
_snake_case = len(lowerCAmelCase_ )
self.assertGreater(lowerCAmelCase_ , 0 )
self.assertEqual(
lowerCAmelCase_ , [
{
'score': ANY(lowerCAmelCase_ ),
'label': ANY(lowerCAmelCase_ ),
'box': {'xmin': ANY(lowerCAmelCase_ ), 'ymin': ANY(lowerCAmelCase_ ), 'xmax': ANY(lowerCAmelCase_ ), 'ymax': ANY(lowerCAmelCase_ )},
}
for i in range(lowerCAmelCase_ )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
_snake_case = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
] , )
_snake_case = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
]
] , )
@require_torch
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = pipeline('zero-shot-object-detection' )
_snake_case = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
] , )
_snake_case = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
@require_torch
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 0.2
_snake_case = pipeline('zero-shot-object-detection' )
_snake_case = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=lowerCAmelCase_ , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
] , )
@require_torch
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 2
_snake_case = pipeline('zero-shot-object-detection' )
_snake_case = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=lowerCAmelCase_ , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
] , )
| 542
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : Union[str, Any] = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
__lowerCamelCase : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__lowerCamelCase : str = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
__lowerCamelCase : List[Any] = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_60_00,
'return_attention_mask': False,
'do_normalize': True,
}
__lowerCamelCase : List[str] = tempfile.mkdtemp()
__lowerCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
# load decoder from hub
__lowerCamelCase : List[str] = 'hf-internal-testing/ngram-beam-search-decoder'
def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Dict = self.add_kwargs_tokens_map.copy()
kwargs.update(SCREAMING_SNAKE_CASE_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> str:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> str:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : str = self.get_tokenizer()
__lowerCamelCase : Dict = self.get_feature_extractor()
__lowerCamelCase : List[Any] = self.get_decoder()
__lowerCamelCase : List[Any] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : Union[str, Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowerCamelCase : Dict = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Optional[Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Dict = self.get_feature_extractor()
__lowerCamelCase : Any = self.get_tokenizer()
__lowerCamelCase : Any = self.get_decoder()
__lowerCamelCase : Optional[int] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = floats_list((3, 10_00) )
__lowerCamelCase : Union[str, Any] = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__lowerCamelCase : Union[str, Any] = processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : Union[str, Any] = self.get_feature_extractor()
__lowerCamelCase : Optional[int] = self.get_tokenizer()
__lowerCamelCase : Optional[Any] = self.get_decoder()
__lowerCamelCase : str = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = 'This is a test string'
__lowerCamelCase : Any = processor(text=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=(2, 10, 16) , SCREAMING_SNAKE_CASE_=77 ) -> List[Any]:
np.random.seed(SCREAMING_SNAKE_CASE_ )
return np.random.rand(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : str = self.get_feature_extractor()
__lowerCamelCase : Union[str, Any] = self.get_tokenizer()
__lowerCamelCase : Optional[Any] = self.get_decoder()
__lowerCamelCase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__lowerCamelCase : Optional[int] = processor.decode(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = decoder.decode_beams(SCREAMING_SNAKE_CASE_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> str:
__lowerCamelCase : Optional[int] = self.get_feature_extractor()
__lowerCamelCase : Any = self.get_tokenizer()
__lowerCamelCase : List[Any] = self.get_decoder()
__lowerCamelCase : str = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowerCamelCase : List[str] = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
else:
with get_context(SCREAMING_SNAKE_CASE_ ).Pool() as pool:
__lowerCamelCase : str = processor.batch_decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as p:
__lowerCamelCase : Dict = decoder.decode_beams_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.logit_score )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.lm_score )
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : str = self.get_feature_extractor()
__lowerCamelCase : str = self.get_tokenizer()
__lowerCamelCase : Any = self.get_decoder()
__lowerCamelCase : str = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = self._get_dummy_logits()
__lowerCamelCase : int = 15
__lowerCamelCase : Dict = -2_0.0
__lowerCamelCase : Optional[Any] = -4.0
__lowerCamelCase : List[Any] = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : List[Any] = decoded_processor_out.text
__lowerCamelCase : Optional[int] = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as pool:
__lowerCamelCase : List[Any] = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Union[str, Any] = [d[0][0] for d in decoded_decoder_out]
__lowerCamelCase : Tuple = [d[0][2] for d in decoded_decoder_out]
__lowerCamelCase : List[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : List[Any] = self.get_feature_extractor()
__lowerCamelCase : int = self.get_tokenizer()
__lowerCamelCase : int = self.get_decoder()
__lowerCamelCase : Dict = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = self._get_dummy_logits()
__lowerCamelCase : Union[str, Any] = 2.0
__lowerCamelCase : str = 5.0
__lowerCamelCase : int = -2_0.0
__lowerCamelCase : Optional[Any] = True
__lowerCamelCase : str = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Any = decoded_processor_out.text
__lowerCamelCase : int = list(SCREAMING_SNAKE_CASE_ )
decoder.reset_params(
alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
with get_context('fork' ).Pool() as pool:
__lowerCamelCase : Union[str, Any] = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Tuple = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Any = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowerCamelCase : Dict = processor.decoder.model_container[processor.decoder._model_key]
__lowerCamelCase : str = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__lowerCamelCase : Optional[int] = os.listdir(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : Union[str, Any] = snapshot_download('hf-internal-testing/processor_with_lm' )
__lowerCamelCase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = processor.decoder.model_container[processor.decoder._model_key]
__lowerCamelCase : int = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__lowerCamelCase : str = os.listdir(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = os.listdir(SCREAMING_SNAKE_CASE_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> str:
__lowerCamelCase : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowerCamelCase : Dict = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowerCamelCase : Tuple = floats_list((3, 10_00) )
__lowerCamelCase : Optional[Any] = processor_wavaveca(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__lowerCamelCase : Optional[int] = processor_auto(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__lowerCamelCase : int = self._get_dummy_logits()
__lowerCamelCase : Tuple = processor_wavaveca.batch_decode(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = processor_auto.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Optional[int] = self.get_feature_extractor()
__lowerCamelCase : List[str] = self.get_tokenizer()
__lowerCamelCase : List[Any] = self.get_decoder()
__lowerCamelCase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def lowercase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
__lowerCamelCase : int = [d[key] for d in offsets]
return retrieved_list
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : str = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowerCamelCase : Optional[Any] = self._get_dummy_logits()[0]
__lowerCamelCase : Dict = processor.decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Tuple = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowerCamelCase : Union[str, Any] = self._get_dummy_logits()
__lowerCamelCase : Tuple = processor.batch_decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowercase_ ( self ) -> Union[str, Any]:
import torch
__lowerCamelCase : Optional[Any] = load_dataset('common_voice' , 'en' , split='train' , streaming=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_60_00 ) )
__lowerCamelCase : List[str] = iter(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = next(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
__lowerCamelCase : Tuple = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowerCamelCase : List[Any] = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(SCREAMING_SNAKE_CASE_ ).logits.cpu().numpy()
__lowerCamelCase : Optional[int] = processor.decode(logits[0] , output_word_offsets=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowerCamelCase : Optional[int] = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
__lowerCamelCase : Optional[Any] = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , output.text )
# output times
__lowerCamelCase : str = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'start_time' ) )
__lowerCamelCase : Tuple = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'end_time' ) )
# fmt: off
__lowerCamelCase : Any = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__lowerCamelCase : str = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
| 13
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__a = None
__a = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__a = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = None
# Automatically constructed
lowerCAmelCase = "PIL.Image.Image"
lowerCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
lowerCAmelCase = field(default='''Image''' , init=_a , repr=_a )
def __call__( self ) -> Tuple:
return self.pa_type
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = np.array(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return {"path": value, "bytes": None}
elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return {"path": None, "bytes": value}
elif isinstance(_SCREAMING_SNAKE_CASE ,np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE ,PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_SCREAMING_SNAKE_CASE )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
UpperCAmelCase_ : Dict = {}
UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = PIL.Image.open(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Dict = path.split('''::''' )[-1]
try:
UpperCAmelCase_ : Optional[int] = string_to_dict(_SCREAMING_SNAKE_CASE ,config.HUB_DATASETS_URL )['''repo_id''']
UpperCAmelCase_ : Tuple = token_per_repo_id.get(_SCREAMING_SNAKE_CASE )
except ValueError:
UpperCAmelCase_ : Optional[Any] = None
with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ,use_auth_token=_SCREAMING_SNAKE_CASE ) as f:
UpperCAmelCase_ : List[str] = BytesIO(f.read() )
UpperCAmelCase_ : Optional[Any] = PIL.Image.open(bytes_ )
else:
UpperCAmelCase_ : List[Any] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def a__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
UpperCAmelCase_ : Dict = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() )
UpperCAmelCase_ : Dict = pa.StructArray.from_arrays([bytes_array, storage] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Tuple = pa.StructArray.from_arrays([storage, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
UpperCAmelCase_ : Dict = storage.field('''bytes''' )
else:
UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
UpperCAmelCase_ : int = storage.field('''path''' )
else:
UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCAmelCase_ : Optional[Any] = pa.array(
[encode_np_array(np.array(_SCREAMING_SNAKE_CASE ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,)
UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Dict = pa.StructArray.from_arrays(
[bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_SCREAMING_SNAKE_CASE ):
with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ) as f:
UpperCAmelCase_ : Any = f.read()
return bytes_
UpperCAmelCase_ : Union[str, Any] = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
UpperCAmelCase_ : List[str] = pa.array(
[os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] ,type=pa.string() ,)
UpperCAmelCase_ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type )
def lowerCamelCase__ ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCAmelCase_ : Optional[int] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
UpperCAmelCase_ : int = image.format
else:
UpperCAmelCase_ : List[Any] = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(_lowercase , format=_lowercase )
return buffer.getvalue()
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if hasattr(_lowercase , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
UpperCAmelCase_ : Tuple = array.dtype
UpperCAmelCase_ : List[str] = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
UpperCAmelCase_ : Dict = dtype.kind
UpperCAmelCase_ : Union[str, Any] = dtype.itemsize
UpperCAmelCase_ : Optional[Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCAmelCase_ : Tuple = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCAmelCase_ : Union[str, Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCAmelCase_ : Union[str, Any] = dtype_byteorder + dtype_kind + str(_lowercase )
UpperCAmelCase_ : str = np.dtype(_lowercase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
UpperCAmelCase_ : Any = PIL.Image.fromarray(array.astype(_lowercase ) )
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
UpperCAmelCase_, UpperCAmelCase_ : Tuple = first_non_null_value(_lowercase )
if isinstance(_lowercase , _lowercase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_lowercase , np.ndarray ):
UpperCAmelCase_ : Any = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
elif isinstance(_lowercase , PIL.Image.Image ):
UpperCAmelCase_ : Union[str, Any] = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
else:
return objs
else:
return objs
| 30
| 0
|
def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
return base * power(__snake_case , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
_UpperCamelCase : Optional[int] = int(input('Enter the base: ').strip())
_UpperCamelCase : List[Any] = int(input('Enter the exponent: ').strip())
_UpperCamelCase : List[str] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_UpperCamelCase : Optional[int] = 1 / result
print(F'''{base} to the power of {exponent} is {result}''')
| 715
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase : Tuple = logging.get_logger(__name__)
_UpperCamelCase : Dict = torch.device('cpu')
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] ):
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] )
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Any ):
'''simple docstring'''
lowercase = dct.pop(__snake_case )
lowercase = val
def _SCREAMING_SNAKE_CASE ( __snake_case : Any ):
'''simple docstring'''
lowercase = []
for k in state_dict.keys():
lowercase = k
if ".pwconv" in k:
lowercase = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
lowercase = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
lowercase = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
lowercase = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
lowercase = k_new.split('.' )
if ls[2].isdigit():
lowercase = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
lowercase = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : List[str] ):
'''simple docstring'''
lowercase = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowercase = 10_00
lowercase = 'huggingface/label-files'
lowercase = 'imagenet-1k-id2label.json'
lowercase = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='dataset' ) , 'r' ) )
lowercase = {int(__snake_case ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowercase = [3, 3, 6, 4]
lowercase = [48, 56, 1_12, 2_20]
elif swiftformer_name == "swiftformer_s":
lowercase = [3, 3, 9, 6]
lowercase = [48, 64, 1_68, 2_24]
elif swiftformer_name == "swiftformer_l1":
lowercase = [4, 3, 10, 5]
lowercase = [48, 96, 1_92, 3_84]
elif swiftformer_name == "swiftformer_l3":
lowercase = [4, 4, 12, 6]
lowercase = [64, 1_28, 3_20, 5_12]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
lowercase = torch.hub.load_state_dict_from_url(__snake_case , map_location='cpu' , check_hash=__snake_case )
else:
lowercase = torch.load(__snake_case , map_location='cpu' )
lowercase = checkpoint
lowercase = create_rename_keys(__snake_case )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__snake_case , __snake_case , __snake_case )
# load HuggingFace model
lowercase = SwiftFormerForImageClassification(__snake_case ).eval()
hf_model.load_state_dict(__snake_case )
# prepare test inputs
lowercase = prepare_img()
lowercase = ViTImageProcessor.from_pretrained('preprocessor_config' )
lowercase = processor(images=__snake_case , return_tensors='pt' )
# compare outputs from both models
lowercase = get_expected_output(__snake_case )
lowercase = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 10_00] )
assert torch.allclose(hf_logits[0, 0:5] , __snake_case , atol=1e-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(__snake_case )
if __name__ == "__main__":
_UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_UpperCamelCase : Union[str, Any] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 134
| 0
|
"""simple docstring"""
import random
from typing import Any
def snake_case ( lowerCAmelCase_ ) -> list[Any]:
for _ in range(len(lowerCAmelCase_ ) ):
_snake_case = random.randint(0 , len(lowerCAmelCase_ ) - 1 )
_snake_case = random.randint(0 , len(lowerCAmelCase_ ) - 1 )
_snake_case , _snake_case = data[b], data[a]
return data
if __name__ == "__main__":
snake_case = [0, 1, 2, 3, 4, 5, 6, 7]
snake_case = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 103
|
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
a__ = []
for part_id in partition_order:
a__ = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(UpperCAmelCase__ ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCamelCase ( ) -> int:
'''simple docstring'''
a__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a__ = spark.range(1_00 ).repartition(1 )
a__ = Spark(UpperCAmelCase__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCamelCase ( ) -> Tuple:
'''simple docstring'''
a__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a__ = spark.range(10 ).repartition(2 )
a__ = [1, 0]
a__ = _generate_iterable_examples(UpperCAmelCase__,UpperCAmelCase__ ) # Reverse the partitions.
a__ = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCAmelCase__,UpperCAmelCase__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a__ , a__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
a__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a__ = spark.range(10 ).repartition(1 )
a__ = SparkExamplesIterable(UpperCAmelCase__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(UpperCAmelCase__ ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
a__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a__ = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
a__ = lambda UpperCAmelCase__ : x.reverse()
a__ = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCAmelCase__,[2, 1, 0] )
a__ = SparkExamplesIterable(UpperCAmelCase__ ).shuffle_data_sources(UpperCAmelCase__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(UpperCAmelCase__ ):
a__ , a__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCamelCase ( ) -> Optional[int]:
'''simple docstring'''
a__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a__ = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a__ = SparkExamplesIterable(UpperCAmelCase__ ).shard_data_sources(worker_id=0,num_workers=2 )
assert shard_it_a.n_shards == 2
a__ = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCAmelCase__,[0, 2] )
for i, (row_id, row_dict) in enumerate(UpperCAmelCase__ ):
a__ , a__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a__ = SparkExamplesIterable(UpperCAmelCase__ ).shard_data_sources(worker_id=1,num_workers=2 )
assert shard_it_a.n_shards == 2
a__ = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCAmelCase__,[1, 3] )
for i, (row_id, row_dict) in enumerate(UpperCAmelCase__ ):
a__ , a__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCamelCase ( ) -> Optional[int]:
'''simple docstring'''
a__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a__ = spark.range(1_00 ).repartition(1 )
a__ = Spark(UpperCAmelCase__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 232
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 711
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(SCREAMING_SNAKE_CASE__ ) , """Tatoeba directory does not exist.""" )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=lowerCAmelCase__ )
@slow
def lowercase_ (self ):
'''simple docstring'''
self.resolver.convert_models(["heb-eng"] )
@slow
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Any = self.resolver.write_model_card("opus-mt-he-en" , dry_run=lowerCAmelCase__ )
assert mmeta["long_pair"] == "heb-eng"
| 239
| 0
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = """conditional_detr"""
_UpperCamelCase : Any = ["""past_key_values"""]
_UpperCamelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , snake_case=True , snake_case=None , snake_case=3 , snake_case=300 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=0.0 , snake_case=0.0 , snake_case=True , snake_case="relu" , snake_case=256 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1.0 , snake_case=False , snake_case="sine" , snake_case="resnet50" , snake_case=True , snake_case=False , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=1 , snake_case=1 , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=0.25 , **snake_case , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowercase = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(snake_case , snake_case ):
lowercase = backbone_config.get('model_type' )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(snake_case )
lowercase = use_timm_backbone
lowercase = backbone_config
lowercase = num_channels
lowercase = num_queries
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = init_xavier_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = encoder_layers
lowercase = auxiliary_loss
lowercase = position_embedding_type
lowercase = backbone
lowercase = use_pretrained_backbone
lowercase = dilation
# Hungarian matcher
lowercase = class_cost
lowercase = bbox_cost
lowercase = giou_cost
# Loss coefficients
lowercase = mask_loss_coefficient
lowercase = dice_loss_coefficient
lowercase = cls_loss_coefficient
lowercase = bbox_loss_coefficient
lowercase = giou_loss_coefficient
lowercase = focal_alpha
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.d_model
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 12
| 84
|
import argparse
import os
import re
a__ : str = 'src/transformers'
# Pattern that looks at the indentation in a line.
a__ : Union[str, Any] = re.compile(R'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
a__ : List[Any] = re.compile(R'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
a__ : Dict = re.compile(R'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
a__ : Optional[int] = re.compile(R'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
a__ : Optional[Any] = re.compile(R'\[([^\]]+)\]')
def UpperCAmelCase_ ( _UpperCAmelCase :Tuple ) -> Union[str, Any]:
'''simple docstring'''
A_ = _re_indent.search(_UpperCAmelCase )
return "" if search is None else search.groups()[0]
def UpperCAmelCase_ ( _UpperCAmelCase :Optional[int] , _UpperCAmelCase :List[Any]="" , _UpperCAmelCase :Optional[Any]=None , _UpperCAmelCase :List[str]=None ) -> Optional[Any]:
'''simple docstring'''
A_ = 0
A_ = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(_UpperCAmelCase ):
index += 1
A_ = ['''\n'''.join(lines[:index] )]
else:
A_ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
A_ = [lines[index]]
index += 1
while index < len(_UpperCAmelCase ) and (end_prompt is None or not lines[index].startswith(_UpperCAmelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_UpperCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(_UpperCAmelCase ) )
if index < len(_UpperCAmelCase ) - 1:
A_ = [lines[index + 1]]
index += 1
else:
A_ = []
else:
blocks.append('''\n'''.join(_UpperCAmelCase ) )
A_ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_UpperCAmelCase ) > 0:
blocks.append('''\n'''.join(_UpperCAmelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_UpperCAmelCase ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def UpperCAmelCase_ ( _UpperCAmelCase :List[Any] ) -> Dict:
'''simple docstring'''
def _inner(_UpperCAmelCase :Dict ):
return key(_UpperCAmelCase ).lower().replace('''_''' , '''''' )
return _inner
def UpperCAmelCase_ ( _UpperCAmelCase :List[str] , _UpperCAmelCase :Dict=None ) -> Union[str, Any]:
'''simple docstring'''
def noop(_UpperCAmelCase :int ):
return x
if key is None:
A_ = noop
# Constants are all uppercase, they go first.
A_ = [obj for obj in objects if key(_UpperCAmelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
A_ = [obj for obj in objects if key(_UpperCAmelCase )[0].isupper() and not key(_UpperCAmelCase ).isupper()]
# Functions begin with a lowercase, they go last.
A_ = [obj for obj in objects if not key(_UpperCAmelCase )[0].isupper()]
A_ = ignore_underscore(_UpperCAmelCase )
return sorted(_UpperCAmelCase , key=_UpperCAmelCase ) + sorted(_UpperCAmelCase , key=_UpperCAmelCase ) + sorted(_UpperCAmelCase , key=_UpperCAmelCase )
def UpperCAmelCase_ ( _UpperCAmelCase :int ) -> str:
'''simple docstring'''
def _replace(_UpperCAmelCase :List[Any] ):
A_ = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
A_ = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A_ = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_UpperCAmelCase )] ) + "]"
A_ = import_statement.split('''\n''' )
if len(_UpperCAmelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
A_ = 2 if lines[1].strip() == '''[''' else 1
A_ = [(i, _re_strip_line.search(_UpperCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
A_ = sort_objects(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] )
A_ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_UpperCAmelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
A_ = _re_bracket_content.sub(_replace , lines[1] )
else:
A_ = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A_ = keys[:-1]
A_ = get_indent(lines[1] ) + ''', '''.join([f'"{k}"' for k in sort_objects(_UpperCAmelCase )] )
return "\n".join(_UpperCAmelCase )
else:
# Finally we have to deal with imports fitting on one line
A_ = _re_bracket_content.sub(_replace , _UpperCAmelCase )
return import_statement
def UpperCAmelCase_ ( _UpperCAmelCase :str , _UpperCAmelCase :int=True ) -> str:
'''simple docstring'''
with open(_UpperCAmelCase , encoding='''utf-8''' ) as f:
A_ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
A_ = split_code_in_indented_blocks(
_UpperCAmelCase , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_UpperCAmelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
A_ = main_blocks[block_idx]
A_ = block.split('''\n''' )
# Get to the start of the imports.
A_ = 0
while line_idx < len(_UpperCAmelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
A_ = len(_UpperCAmelCase )
else:
line_idx += 1
if line_idx >= len(_UpperCAmelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
A_ = '''\n'''.join(block_lines[line_idx:-1] )
A_ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
A_ = split_code_in_indented_blocks(_UpperCAmelCase , indent_level=_UpperCAmelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
A_ = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
A_ = [(pattern.search(_UpperCAmelCase ).groups()[0] if pattern.search(_UpperCAmelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
A_ = [(i, key) for i, key in enumerate(_UpperCAmelCase ) if key is not None]
A_ = [x[0] for x in sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
A_ = 0
A_ = []
for i in range(len(_UpperCAmelCase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
A_ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_UpperCAmelCase )
count += 1
# And we put our main block back together with its first and last line.
A_ = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_UpperCAmelCase ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(_UpperCAmelCase ) )
def UpperCAmelCase_ ( _UpperCAmelCase :Optional[Any]=True ) -> Tuple:
'''simple docstring'''
A_ = []
for root, _, files in os.walk(_UpperCAmelCase ):
if "__init__.py" in files:
A_ = sort_imports(os.path.join(_UpperCAmelCase , '''__init__.py''' ) , check_only=_UpperCAmelCase )
if result:
A_ = [os.path.join(_UpperCAmelCase , '''__init__.py''' )]
if len(_UpperCAmelCase ) > 0:
raise ValueError(f'Would overwrite {len(_UpperCAmelCase )} files, run `make style`.' )
if __name__ == "__main__":
a__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a__ : List[Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 188
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase_ = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 721
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A ) -> None:
_lowerCAmelCase =num_of_nodes
_lowerCAmelCase =[]
_lowerCAmelCase ={}
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def UpperCamelCase__ ( self , __A ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCamelCase__ ( self , __A ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowerCAmelCase =self.find_component(__A )
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
if component_size[u_node] <= component_size[v_node]:
_lowerCAmelCase =v_node
component_size[v_node] += component_size[u_node]
self.set_component(__A )
elif component_size[u_node] >= component_size[v_node]:
_lowerCAmelCase =self.find_component(__A )
component_size[u_node] += component_size[v_node]
self.set_component(__A )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =[]
_lowerCAmelCase =0
_lowerCAmelCase =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowerCAmelCase =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowerCAmelCase =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(__A , __A ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__A , __A , __A )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
_lowerCAmelCase =[-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def UpperCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__A : int = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
__A : Optional[int] = parser.parse_args()
__A : Optional[int] = "cpu"
__A : Optional[int] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
__A : str = "path-to-your-trained-model"
__A : Optional[Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__A : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__A : Optional[int] = pipe.to(device)
# to channels last
__A : Dict = pipe.unet.to(memory_format=torch.channels_last)
__A : int = pipe.vae.to(memory_format=torch.channels_last)
__A : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__A : List[Any] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__A : Any = torch.randn(2, 4, 64, 64)
__A : Optional[int] = torch.rand(1) * 999
__A : List[Any] = torch.randn(2, 77, 768)
__A : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
__A : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__A : Any = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__A : Tuple = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__A : Dict = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__A : List[str] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__A : int = 666
__A : Optional[int] = torch.Generator(device).manual_seed(seed)
__A : str = {"generator": generator}
if args.steps is not None:
__A : int = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__A : Tuple = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 130
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ (a_ , unittest.TestCase ):
UpperCAmelCase__ = KandinskyVaaImgaImgPipeline
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''image''']
UpperCAmelCase__ = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
UpperCAmelCase__ = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase__ = False
@property
def _lowercase ( self ):
'''simple docstring'''
return 3_2
@property
def _lowercase ( self ):
'''simple docstring'''
return 3_2
@property
def _lowercase ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowercase ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowercase ( self ):
'''simple docstring'''
return 1_0_0
@property
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase = UNetaDConditionModel(**_A )
return model
@property
def _lowercase ( self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.dummy_unet
UpperCAmelCase = self.dummy_movq
UpperCAmelCase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
UpperCAmelCase = DDIMScheduler(**_A )
UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowercase ( self , _A , _A=0 ):
'''simple docstring'''
UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_A )
# create init_image
UpperCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(_A )
else:
UpperCAmelCase = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 1_0,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = '''cpu'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
UpperCAmelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = pipe(**self.get_dummy_inputs(_A ) )
UpperCAmelCase = output.images
UpperCAmelCase = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase = np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
UpperCAmelCase = '''A red cartoon frog, 4k'''
UpperCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_A )
UpperCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
UpperCAmelCase = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase = pipeline(
image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type='''np''' , )
UpperCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_A , _A )
| 130
| 1
|
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=2 , __A=3 , __A=4 , __A=2 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=36 , __A=3 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.0_2 , __A=6 , __A=6 , __A=3 , __A=4 , __A=None , __A=1000 , ) -> Tuple:
lowerCAmelCase_ :List[Any] = parent
lowerCAmelCase_ :str = batch_size
lowerCAmelCase_ :Union[str, Any] = num_channels
lowerCAmelCase_ :Any = image_size
lowerCAmelCase_ :Union[str, Any] = patch_size
lowerCAmelCase_ :List[Any] = text_seq_length
lowerCAmelCase_ :int = is_training
lowerCAmelCase_ :Union[str, Any] = use_input_mask
lowerCAmelCase_ :Optional[int] = use_token_type_ids
lowerCAmelCase_ :Union[str, Any] = use_labels
lowerCAmelCase_ :Optional[Any] = vocab_size
lowerCAmelCase_ :Union[str, Any] = hidden_size
lowerCAmelCase_ :Any = num_hidden_layers
lowerCAmelCase_ :int = num_attention_heads
lowerCAmelCase_ :List[Any] = intermediate_size
lowerCAmelCase_ :str = hidden_act
lowerCAmelCase_ :Union[str, Any] = hidden_dropout_prob
lowerCAmelCase_ :Tuple = attention_probs_dropout_prob
lowerCAmelCase_ :str = max_position_embeddings
lowerCAmelCase_ :Optional[Any] = type_vocab_size
lowerCAmelCase_ :Any = type_sequence_label_size
lowerCAmelCase_ :Optional[int] = initializer_range
lowerCAmelCase_ :Dict = coordinate_size
lowerCAmelCase_ :Union[str, Any] = shape_size
lowerCAmelCase_ :Dict = num_labels
lowerCAmelCase_ :List[str] = num_choices
lowerCAmelCase_ :int = scope
lowerCAmelCase_ :Tuple = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCAmelCase_ :int = text_seq_length
lowerCAmelCase_ :List[str] = (image_size // patch_size) ** 2 + 1
lowerCAmelCase_ :List[Any] = self.text_seq_length + self.image_seq_length
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowerCAmelCase_ :Dict = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase_ :Optional[int] = bbox[i, j, 3]
lowerCAmelCase_ :Dict = bbox[i, j, 1]
lowerCAmelCase_ :str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase_ :Union[str, Any] = bbox[i, j, 2]
lowerCAmelCase_ :int = bbox[i, j, 0]
lowerCAmelCase_ :Tuple = t
lowerCAmelCase_ :int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ :Dict = None
if self.use_input_mask:
lowerCAmelCase_ :List[str] = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCAmelCase_ :Union[str, Any] = None
if self.use_token_type_ids:
lowerCAmelCase_ :int = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowerCAmelCase_ :List[str] = None
lowerCAmelCase_ :str = None
if self.use_labels:
lowerCAmelCase_ :int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ :Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowerCAmelCase_ :Optional[Any] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A , __A ) -> Dict:
lowerCAmelCase_ :int = LayoutLMvaModel(config=__A )
model.to(__A )
model.eval()
# text + image
lowerCAmelCase_ :Optional[int] = model(__A , pixel_values=__A )
lowerCAmelCase_ :Union[str, Any] = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A )
lowerCAmelCase_ :Any = model(__A , bbox=__A , pixel_values=__A , token_type_ids=__A )
lowerCAmelCase_ :str = model(__A , bbox=__A , pixel_values=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCAmelCase_ :List[Any] = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCAmelCase_ :List[str] = model(pixel_values=__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A , __A ) -> Tuple:
lowerCAmelCase_ :int = self.num_labels
lowerCAmelCase_ :List[Any] = LayoutLMvaForSequenceClassification(__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :int = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A , __A ) -> Dict:
lowerCAmelCase_ :Any = self.num_labels
lowerCAmelCase_ :Union[str, Any] = LayoutLMvaForTokenClassification(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :str = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A , __A ) -> Any:
lowerCAmelCase_ :List[str] = LayoutLMvaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Dict = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :List[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) :Dict = config_and_inputs
lowerCAmelCase_ :List[Any] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :List[str] = False
UpperCAmelCase_ :Optional[Any] = False
UpperCAmelCase_ :str = False
UpperCAmelCase_ :Tuple = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ :Any = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A ) -> List[str]:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :str = LayoutLMvaModelTester(self )
lowerCAmelCase_ :Any = ConfigTester(self , config_class=__A , hidden_size=37 )
def __lowerCAmelCase ( self , __A , __A , __A=False ) -> int:
lowerCAmelCase_ :Optional[Any] = copy.deepcopy(__A )
if model_class in get_values(__A ):
lowerCAmelCase_ :Union[str, Any] = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(__A , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__A ):
lowerCAmelCase_ :List[Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__A )
elif model_class in get_values(__A ):
lowerCAmelCase_ :int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
lowerCAmelCase_ :int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
elif model_class in [
*get_values(__A ),
]:
lowerCAmelCase_ :Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
elif model_class in [
*get_values(__A ),
]:
lowerCAmelCase_ :Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__A , )
return inputs_dict
def __lowerCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ :str = type
self.model_tester.create_and_check_model(*__A )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
@slow
def __lowerCAmelCase ( self ) -> Tuple:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ :str = LayoutLMvaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def _snake_case ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> List[Any]:
return LayoutLMvaImageProcessor(apply_ocr=__A ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :int = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(__A )
lowerCAmelCase_ :Any = self.default_image_processor
lowerCAmelCase_ :str = prepare_img()
lowerCAmelCase_ :List[str] = image_processor(images=__A , return_tensors="""pt""" ).pixel_values.to(__A )
lowerCAmelCase_ :str = torch.tensor([[1, 2]] )
lowerCAmelCase_ :List[str] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
lowerCAmelCase_ :Union[str, Any] = model(
input_ids=input_ids.to(__A ) , bbox=bbox.to(__A ) , pixel_values=pixel_values.to(__A ) , )
# verify the logits
lowerCAmelCase_ :Union[str, Any] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , __A )
lowerCAmelCase_ :Optional[int] = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(__A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1E-4 ) )
| 256
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Tuple = "wavlm"
def __init__( self , __A=32 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=0.1 , __A=0.0 , __A=0.1 , __A=0.1 , __A=0.0_2 , __A=1E-5 , __A="group" , __A="gelu" , __A=(512, 512, 512, 512, 512, 512, 512) , __A=(5, 2, 2, 2, 2, 2, 2) , __A=(10, 3, 3, 3, 3, 2, 2) , __A=False , __A=128 , __A=16 , __A=320 , __A=800 , __A=False , __A=True , __A=0.0_5 , __A=10 , __A=2 , __A=0.0 , __A=10 , __A=320 , __A=2 , __A=0.1 , __A=100 , __A=256 , __A=256 , __A=0.1 , __A="mean" , __A=False , __A=False , __A=256 , __A=(512, 512, 512, 512, 1500) , __A=(5, 3, 3, 1, 1) , __A=(1, 2, 3, 1, 1) , __A=512 , __A=80 , __A=0 , __A=1 , __A=2 , __A=False , __A=3 , __A=2 , __A=3 , __A=None , **__A , ) -> Any:
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
lowerCAmelCase_ :Any = hidden_size
lowerCAmelCase_ :Union[str, Any] = feat_extract_norm
lowerCAmelCase_ :Optional[Any] = feat_extract_activation
lowerCAmelCase_ :int = list(__A )
lowerCAmelCase_ :Optional[int] = list(__A )
lowerCAmelCase_ :List[Any] = list(__A )
lowerCAmelCase_ :Any = conv_bias
lowerCAmelCase_ :int = num_buckets
lowerCAmelCase_ :List[str] = max_bucket_distance
lowerCAmelCase_ :List[str] = num_conv_pos_embeddings
lowerCAmelCase_ :Dict = num_conv_pos_embedding_groups
lowerCAmelCase_ :Union[str, Any] = len(self.conv_dim )
lowerCAmelCase_ :Dict = num_hidden_layers
lowerCAmelCase_ :List[str] = intermediate_size
lowerCAmelCase_ :Optional[int] = hidden_act
lowerCAmelCase_ :List[Any] = num_attention_heads
lowerCAmelCase_ :Union[str, Any] = hidden_dropout
lowerCAmelCase_ :Optional[Any] = attention_dropout
lowerCAmelCase_ :List[Any] = activation_dropout
lowerCAmelCase_ :Union[str, Any] = feat_proj_dropout
lowerCAmelCase_ :Optional[Any] = final_dropout
lowerCAmelCase_ :Optional[Any] = layerdrop
lowerCAmelCase_ :Union[str, Any] = layer_norm_eps
lowerCAmelCase_ :Union[str, Any] = initializer_range
lowerCAmelCase_ :List[Any] = num_ctc_classes
lowerCAmelCase_ :Tuple = vocab_size
lowerCAmelCase_ :List[str] = do_stable_layer_norm
lowerCAmelCase_ :Union[str, Any] = use_weighted_layer_sum
lowerCAmelCase_ :Optional[int] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase_ :List[str] = apply_spec_augment
lowerCAmelCase_ :Tuple = mask_time_prob
lowerCAmelCase_ :Optional[Any] = mask_time_length
lowerCAmelCase_ :int = mask_time_min_masks
lowerCAmelCase_ :Optional[Any] = mask_feature_prob
lowerCAmelCase_ :Optional[int] = mask_feature_length
# parameters for pretraining with codevector quantized representations
lowerCAmelCase_ :Optional[Any] = num_codevectors_per_group
lowerCAmelCase_ :Optional[int] = num_codevector_groups
lowerCAmelCase_ :Tuple = contrastive_logits_temperature
lowerCAmelCase_ :Tuple = num_negatives
lowerCAmelCase_ :str = codevector_dim
lowerCAmelCase_ :int = proj_codevector_dim
lowerCAmelCase_ :Optional[Any] = diversity_loss_weight
# ctc loss
lowerCAmelCase_ :Union[str, Any] = ctc_loss_reduction
lowerCAmelCase_ :Optional[Any] = ctc_zero_infinity
# adapter
lowerCAmelCase_ :Union[str, Any] = add_adapter
lowerCAmelCase_ :List[str] = adapter_kernel_size
lowerCAmelCase_ :Union[str, Any] = adapter_stride
lowerCAmelCase_ :Union[str, Any] = num_adapter_layers
lowerCAmelCase_ :Tuple = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase_ :str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase_ :List[Any] = list(__A )
lowerCAmelCase_ :List[str] = list(__A )
lowerCAmelCase_ :Optional[int] = list(__A )
lowerCAmelCase_ :Optional[int] = xvector_output_dim
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 256
| 1
|
'''simple docstring'''
def __snake_case ( lowercase : int = 50 ):
snake_case_ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 508
|
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
snake_case = """data2vec-audio"""
def __init__( self , UpperCAmelCase_=32 , UpperCAmelCase_=7_68 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=30_72 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1e-5 , UpperCAmelCase_="gelu" , UpperCAmelCase_=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , UpperCAmelCase_=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_=False , UpperCAmelCase_=16 , UpperCAmelCase_=19 , UpperCAmelCase_=5 , UpperCAmelCase_=0.05 , UpperCAmelCase_=10 , UpperCAmelCase_=2 , UpperCAmelCase_=0.0 , UpperCAmelCase_=10 , UpperCAmelCase_=0 , UpperCAmelCase_="sum" , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=2_56 , UpperCAmelCase_=(5_12, 5_12, 5_12, 5_12, 15_00) , UpperCAmelCase_=(5, 3, 3, 1, 1) , UpperCAmelCase_=(1, 2, 3, 1, 1) , UpperCAmelCase_=5_12 , UpperCAmelCase_=0 , UpperCAmelCase_=1 , UpperCAmelCase_=2 , UpperCAmelCase_=False , UpperCAmelCase_=3 , UpperCAmelCase_=2 , UpperCAmelCase_=3 , UpperCAmelCase_=None , **UpperCAmelCase_ , ):
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
snake_case_ = hidden_size
snake_case_ = feat_extract_activation
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = conv_bias
snake_case_ = num_conv_pos_embeddings
snake_case_ = num_conv_pos_embedding_groups
snake_case_ = conv_pos_kernel_size
snake_case_ = len(self.conv_dim )
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = feat_proj_dropout
snake_case_ = final_dropout
snake_case_ = layerdrop
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = vocab_size
snake_case_ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ = mask_time_prob
snake_case_ = mask_time_length
snake_case_ = mask_time_min_masks
snake_case_ = mask_feature_prob
snake_case_ = mask_feature_length
snake_case_ = mask_feature_min_masks
# ctc loss
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# adapter
snake_case_ = add_adapter
snake_case_ = adapter_kernel_size
snake_case_ = adapter_stride
snake_case_ = num_adapter_layers
snake_case_ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = xvector_output_dim
@property
def _lowercase ( self ):
return math.prod(self.conv_stride )
| 508
| 1
|
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Tuple = image.size
__UpperCAmelCase ,__UpperCAmelCase : List[Any] = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
__UpperCAmelCase : Tuple = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
__UpperCAmelCase : Optional[int] = np.array(_UpperCamelCase ).astype(np.floataa ) / 255.0
__UpperCAmelCase : Dict = image[None].transpose(0 , 3 , 1 , 2 )
__UpperCAmelCase : Optional[int] = torch.from_numpy(_UpperCamelCase )
return 2.0 * image - 1.0
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase : VQModel , UpperCamelCase : UNetaDModel , UpperCamelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=UpperCamelCase , unet=UpperCamelCase , scheduler=UpperCamelCase )
@torch.no_grad()
def __call__( self : int , UpperCamelCase : Union[torch.Tensor, PIL.Image.Image] = None , UpperCamelCase : Optional[int] = 1 , UpperCamelCase : Optional[int] = 100 , UpperCamelCase : Optional[float] = 0.0 , UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase : Optional[str] = "pil" , UpperCamelCase : bool = True , ):
'''simple docstring'''
if isinstance(UpperCamelCase , PIL.Image.Image ):
__UpperCAmelCase : List[Any] = 1
elif isinstance(UpperCamelCase , torch.Tensor ):
__UpperCAmelCase : Tuple = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCamelCase )}''' )
if isinstance(UpperCamelCase , PIL.Image.Image ):
__UpperCAmelCase : Union[str, Any] = preprocess(UpperCamelCase )
__UpperCAmelCase ,__UpperCAmelCase : List[str] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__UpperCAmelCase : Union[str, Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
__UpperCAmelCase : Optional[Any] = next(self.unet.parameters() ).dtype
__UpperCAmelCase : int = randn_tensor(UpperCamelCase , generator=UpperCamelCase , device=self.device , dtype=UpperCamelCase )
__UpperCAmelCase : Dict = image.to(device=self.device , dtype=UpperCamelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCamelCase , device=self.device )
__UpperCAmelCase : Union[str, Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__UpperCAmelCase : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__UpperCAmelCase : Optional[int] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__UpperCAmelCase : str = {}
if accepts_eta:
__UpperCAmelCase : str = eta
for t in self.progress_bar(UpperCamelCase ):
# concat latents and low resolution image in the channel dimension.
__UpperCAmelCase : Dict = torch.cat([latents, image] , dim=1 )
__UpperCAmelCase : List[Any] = self.scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
# predict the noise residual
__UpperCAmelCase : List[str] = self.unet(UpperCamelCase , UpperCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase : Dict = self.scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample
# decode the image latents with the VQVAE
__UpperCAmelCase : str = self.vqvae.decode(UpperCamelCase ).sample
__UpperCAmelCase : Tuple = torch.clamp(UpperCamelCase , -1.0 , 1.0 )
__UpperCAmelCase : Dict = image / 2 + 0.5
__UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCAmelCase : Union[str, Any] = self.numpy_to_pil(UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase )
| 299
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : List[str] = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = """bloom"""
__a = ["""past_key_values"""]
__a = {
"""num_hidden_layers""": """n_layer""",
"""num_attention_heads""": """n_head""",
}
def __init__( self : Optional[Any] , UpperCamelCase : Any=250_880 , UpperCamelCase : int=64 , UpperCamelCase : Tuple=2 , UpperCamelCase : Optional[int]=8 , UpperCamelCase : int=1e-5 , UpperCamelCase : str=0.02 , UpperCamelCase : List[str]=True , UpperCamelCase : Dict=1 , UpperCamelCase : Union[str, Any]=2 , UpperCamelCase : Optional[Any]=False , UpperCamelCase : List[Any]=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : Optional[int]=1 , UpperCamelCase : Any=False , **UpperCamelCase : str , ):
'''simple docstring'''
__UpperCAmelCase : int = vocab_size
# Backward compatibility with n_embed kwarg
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""n_embed""" , UpperCamelCase )
__UpperCAmelCase : Dict = hidden_size if n_embed is None else n_embed
__UpperCAmelCase : List[Any] = n_layer
__UpperCAmelCase : Tuple = n_head
__UpperCAmelCase : Tuple = layer_norm_epsilon
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Optional[Any] = use_cache
__UpperCAmelCase : Union[str, Any] = pretraining_tp
__UpperCAmelCase : Optional[int] = apply_residual_connection_post_layernorm
__UpperCAmelCase : List[Any] = hidden_dropout
__UpperCAmelCase : List[str] = attention_dropout
__UpperCAmelCase : Optional[int] = bos_token_id
__UpperCAmelCase : List[Any] = eos_token_id
__UpperCAmelCase : List[Any] = slow_but_exact
super().__init__(bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = version.parse("""1.12""" )
def __init__( self : Optional[Any] , UpperCamelCase : PretrainedConfig , UpperCamelCase : str = "default" , UpperCamelCase : List[PatchingSpec] = None , UpperCamelCase : bool = False , ):
'''simple docstring'''
super().__init__(UpperCamelCase , task=UpperCamelCase , patching_specs=UpperCamelCase , use_past=UpperCamelCase )
if not getattr(self._config , """pad_token_id""" , UpperCamelCase ):
# TODO: how to do that better?
__UpperCAmelCase : List[str] = 0
@property
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Dict = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(UpperCamelCase , direction="""inputs""" , inverted_values_shape=UpperCamelCase )
__UpperCAmelCase : Any = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__UpperCAmelCase : List[str] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
return self._config.n_layer
@property
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
return self._config.n_head
@property
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
return 1e-3
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : "PreTrainedTokenizer" , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional["TensorType"] = None , ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = super(UpperCamelCase , self ).generate_dummy_inputs(
UpperCamelCase , batch_size=UpperCamelCase , seq_length=UpperCamelCase , is_pair=UpperCamelCase , framework=UpperCamelCase )
# We need to order the input in the way they appears in the forward()
__UpperCAmelCase : Optional[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__UpperCAmelCase ,__UpperCAmelCase : Any = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__UpperCAmelCase : Union[str, Any] = seqlen + 2
__UpperCAmelCase : int = self._config.hidden_size // self.num_attention_heads
__UpperCAmelCase : Optional[Any] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__UpperCAmelCase : Optional[Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__UpperCAmelCase : str = [
(torch.zeros(UpperCamelCase ), torch.zeros(UpperCamelCase )) for _ in range(self.num_layers )
]
__UpperCAmelCase : Union[str, Any] = common_inputs["""attention_mask"""]
if self.use_past:
__UpperCAmelCase : List[str] = ordered_inputs["""attention_mask"""].dtype
__UpperCAmelCase : List[str] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(UpperCamelCase , UpperCamelCase , dtype=UpperCamelCase )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return 13
| 299
| 1
|
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCamelCase__ : int = 50_003
lowerCamelCase__ : List[str] = 50_002
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = PLBartTokenizer
lowercase_ = None
lowercase_ = False
def lowerCAmelCase_ ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ = PLBartTokenizer(_lowerCAmelCase , language_codes='base' , keep_accents=_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = PLBartTokenizer(_lowerCAmelCase , language_codes='base' , keep_accents=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('This is a test' )
self.assertListEqual(_lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
SCREAMING_SNAKE_CASE_ = tokenizer.vocab_size
SCREAMING_SNAKE_CASE_ = [tokenizer.convert_ids_to_tokens(_lowerCAmelCase ) for x in range(end - 4 , _lowerCAmelCase )]
self.assertListEqual(_lowerCAmelCase , ['__java__', '__python__', '__en_XX__', '<mask>'] )
SCREAMING_SNAKE_CASE_ = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
SCREAMING_SNAKE_CASE_ = tokenizer(_lowerCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase ) , _lowerCAmelCase , )
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = PLBartTokenizer(_lowerCAmelCase , language_codes='multi' , keep_accents=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('This is a test' )
self.assertListEqual(_lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
SCREAMING_SNAKE_CASE_ = tokenizer.vocab_size
SCREAMING_SNAKE_CASE_ = [tokenizer.convert_ids_to_tokens(_lowerCAmelCase ) for x in range(end - 7 , _lowerCAmelCase )]
self.assertListEqual(
_lowerCAmelCase , ['__java__', '__python__', '__en_XX__', '__javascript__', '__php__', '__ruby__', '__go__'] )
SCREAMING_SNAKE_CASE_ = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
SCREAMING_SNAKE_CASE_ = tokenizer(_lowerCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase ) , _lowerCAmelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = "uclanlp/plbart-python-en_XX"
lowercase_ = [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
lowercase_ = [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
lowercase_ = [
134,
5_452,
33_460,
33_441,
33_463,
33_465,
33_463,
33_449,
988,
20,
33_456,
19,
33_456,
771,
39,
4_258,
889,
3_318,
33_441,
33_463,
33_465,
33_463,
33_449,
2_471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCAmelCase_ ( cls : Optional[int] ):
SCREAMING_SNAKE_CASE_ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='base' , src_lang='python' , tgt_lang='en_XX' )
SCREAMING_SNAKE_CASE_ = 1
return cls
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__java__'] , 50_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__python__'] , 50_002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__en_XX__'] , 50_003 )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
self.assertIn(_lowerCAmelCase , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE_ = [EN_CODE, 9_037, 33_442, 57, 752, 153, 14, 56, 18, 9, 2]
SCREAMING_SNAKE_CASE_ = self.tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = ['def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])' * 20]
self.assertIsInstance(src_text[0] , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = self.tokenizer(_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', '__java__'] ) , [50_004, 50_001] )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = PLBartTokenizer.from_pretrained(_lowerCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowerCAmelCase )
@require_torch
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_lowerCAmelCase , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _lowerCAmelCase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.src_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=3 , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=10 , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ = targets['input_ids']
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_lowerCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='java' )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , {
# A, test, EOS, en_XX
'input_ids': [[150, 242, 2, 50_003]],
'attention_mask': [[1, 1, 1, 1]],
# java
'forced_bos_token_id': 50_001,
} , )
| 31
|
import torch
from diffusers import DiffusionPipeline
class _A ( __UpperCamelCase ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
def __call__(self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
UpperCamelCase__ = 1
UpperCamelCase__ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase__ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = scheduler_output - scheduler_output + torch.ones_like(SCREAMING_SNAKE_CASE_ )
return result
| 415
| 0
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''-m''', '''--pretrained_model_name_or_path''', type=snake_case, default=snake_case, required=snake_case, help='''Path to pretrained model or model identifier from huggingface.co/models.''', )
parser.add_argument(
'''-c''', '''--caption''', type=snake_case, default='''robotic cat with wings''', help='''Text used to generate images.''', )
parser.add_argument(
'''-n''', '''--images_num''', type=snake_case, default=4, help='''How much images to generate.''', )
parser.add_argument(
'''-s''', '''--seed''', type=snake_case, default=4_2, help='''Seed for random process.''', )
parser.add_argument(
'''-ci''', '''--cuda_id''', type=snake_case, default=0, help='''cuda_id.''', )
__magic_name__ :Optional[Any] = parser.parse_args()
return args
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if not len(snake_case ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
__magic_name__ :Tuple = imgs[0].size
__magic_name__ :List[Any] = Image.new('''RGB''', size=(cols * w, rows * h) )
__magic_name__ :List[Any] = grid.size
for i, img in enumerate(snake_case ):
grid.paste(snake_case, box=(i % cols * w, i // cols * h) )
return grid
def __lowercase ( snake_case, snake_case="robotic cat with wings", snake_case=7.5, snake_case=5_0, snake_case=1, snake_case=4_2, ):
"""simple docstring"""
__magic_name__ :List[Any] = torch.Generator(pipeline.device ).manual_seed(snake_case )
__magic_name__ :str = pipeline(
snake_case, guidance_scale=snake_case, num_inference_steps=snake_case, generator=snake_case, num_images_per_prompt=snake_case, ).images
__magic_name__ :Tuple = int(math.sqrt(snake_case ) )
__magic_name__ :Union[str, Any] = image_grid(snake_case, rows=_rows, cols=num_images_per_prompt // _rows )
return grid, images
SCREAMING_SNAKE_CASE__ : Any = parse_args()
# Load models and create wrapper for stable diffusion
SCREAMING_SNAKE_CASE__ : int = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""")
SCREAMING_SNAKE_CASE__ : str = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""")
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""")
SCREAMING_SNAKE_CASE__ : int = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""")
SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
SCREAMING_SNAKE_CASE__ : str = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")):
SCREAMING_SNAKE_CASE__ : str = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, """unet""", unet)
else:
SCREAMING_SNAKE_CASE__ : Dict = unet.to(torch.device("""cuda""", args.cuda_id))
SCREAMING_SNAKE_CASE__ : Any = pipeline.to(unet.device)
SCREAMING_SNAKE_CASE__ : Dict = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split()))))
SCREAMING_SNAKE_CASE__ : int = os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
| 710
|
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __lowercase ( snake_case, snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
for attribute in key.split('''.''' ):
__magic_name__ :List[Any] = getattr(snake_case, snake_case )
if weight_type is not None:
__magic_name__ :Union[str, Any] = getattr(snake_case, snake_case ).shape
else:
__magic_name__ :List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__magic_name__ :Optional[Any] = value
elif weight_type == "weight_g":
__magic_name__ :Dict = value
elif weight_type == "weight_v":
__magic_name__ :Any = value
elif weight_type == "bias":
__magic_name__ :List[Any] = value
else:
__magic_name__ :Union[str, Any] = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = []
__magic_name__ :Dict = fairseq_model.state_dict()
__magic_name__ :Union[str, Any] = hf_model.feature_extractor
__magic_name__ :Optional[int] = hf_model.adapter
for name, value in fairseq_dict.items():
__magic_name__ :List[Any] = False
if "conv_layers" in name:
load_conv_layer(
snake_case, snake_case, snake_case, snake_case, hf_model.config.feat_extract_norm == '''group''', )
__magic_name__ :List[str] = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(snake_case, snake_case, snake_case, snake_case )
__magic_name__ :List[str] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__magic_name__ :str = True
if "*" in mapped_key:
__magic_name__ :Tuple = name.split(snake_case )[0].split('''.''' )[-2]
__magic_name__ :Optional[int] = mapped_key.replace('''*''', snake_case )
if "weight_g" in name:
__magic_name__ :Union[str, Any] = '''weight_g'''
elif "weight_v" in name:
__magic_name__ :int = '''weight_v'''
elif "bias" in name:
__magic_name__ :Optional[int] = '''bias'''
elif "weight" in name:
__magic_name__ :Optional[int] = '''weight'''
else:
__magic_name__ :Union[str, Any] = None
set_recursively(snake_case, snake_case, snake_case, snake_case, snake_case )
continue
if not is_used:
unused_weights.append(snake_case )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __lowercase ( snake_case, snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Optional[int] = full_name.split('''conv_layers.''' )[-1]
__magic_name__ :List[str] = name.split('''.''' )
__magic_name__ :Optional[int] = int(items[0] )
__magic_name__ :Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__magic_name__ :Tuple = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__magic_name__ :List[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__magic_name__ :Dict = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__magic_name__ :Any = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case )
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = full_name.split('''adaptor.''' )[-1]
__magic_name__ :Optional[Any] = name.split('''.''' )
if items[1].isdigit():
__magic_name__ :Tuple = int(items[1] )
else:
__magic_name__ :str = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
__magic_name__ :Optional[int] = value
logger.info(f'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
__magic_name__ :Any = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
__magic_name__ :List[Any] = value
logger.info(f'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
__magic_name__ :Union[str, Any] = value
logger.info(f'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(snake_case, snake_case ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
__magic_name__ :Tuple = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
__magic_name__ :Optional[int] = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case )
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ , __magic_name__ :int = emb.weight.shape
__magic_name__ :Tuple = nn.Linear(snake_case, snake_case, bias=snake_case )
__magic_name__ :Optional[int] = emb.weight.data
return lin_layer
@torch.no_grad()
def __lowercase ( snake_case, snake_case, snake_case, snake_case, snake_case, snake_case, snake_case, snake_case, snake_case, snake_case, snake_case, ):
"""simple docstring"""
__magic_name__ :Any = WavaVecaConfig.from_pretrained(
snake_case, add_adapter=snake_case, adapter_stride=snake_case, adapter_kernel_size=snake_case, use_auth_token=snake_case, output_hidden_size=snake_case, )
__magic_name__ :Union[str, Any] = MBartConfig.from_pretrained(snake_case )
# load model
__magic_name__ , __magic_name__ , __magic_name__ :Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
}, )
__magic_name__ :Tuple = model[0].eval()
# load feature extractor
__magic_name__ :Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(snake_case, use_auth_token=snake_case )
# set weights for wav2vec2 encoder
__magic_name__ :str = WavaVecaModel(snake_case )
recursively_load_weights_wavaveca(model.encoder, snake_case )
# load decoder weights
__magic_name__ :int = MBartForCausalLM(snake_case )
__magic_name__ , __magic_name__ :int = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=snake_case )
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
__magic_name__ :Tuple = SpeechEncoderDecoderModel(encoder=snake_case, decoder=snake_case )
__magic_name__ :int = False
__magic_name__ :int = MBartaaTokenizer(snake_case )
tokenizer.save_pretrained(snake_case )
__magic_name__ :Union[str, Any] = hf_wavavec.config.to_dict()
__magic_name__ :Any = tokenizer.pad_token_id
__magic_name__ :List[Any] = tokenizer.bos_token_id
__magic_name__ :List[Any] = tokenizer.eos_token_id
__magic_name__ :Any = '''mbart50'''
__magic_name__ :Any = '''wav2vec2'''
__magic_name__ :Dict = tokenizer.eos_token_id
__magic_name__ :Optional[int] = 2_5_0_0_0_4
__magic_name__ :List[Any] = tokenizer.eos_token_id
__magic_name__ :int = SpeechEncoderDecoderConfig.from_dict(snake_case )
hf_wavavec.save_pretrained(snake_case )
feature_extractor.save_pretrained(snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=10_24, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=25_00_04, type=int, help="""`decoder_start_token_id` of model config""")
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 180
| 0
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
SCREAMING_SNAKE_CASE__ : Dict = """platform"""
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> str:
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase__ : Optional[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase__ : Tuple = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase__ : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase__ : str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase__ : List[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class UpperCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=99 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=32 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=0.0_2 , ):
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : str = seq_length
UpperCAmelCase__ : List[Any] = is_training
UpperCAmelCase__ : List[str] = use_labels
UpperCAmelCase__ : Any = vocab_size
UpperCAmelCase__ : Any = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : List[Any] = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : List[Any] = max_position_embeddings
UpperCAmelCase__ : Optional[int] = eos_token_id
UpperCAmelCase__ : Optional[int] = pad_token_id
UpperCAmelCase__ : Union[str, Any] = bos_token_id
UpperCAmelCase__ : List[Any] = initializer_range
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase__ : Tuple = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase__ : Optional[int] = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
UpperCAmelCase__ : Dict = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCAmelCase , )
UpperCAmelCase__ : Dict = prepare_blenderbot_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def __UpperCAmelCase ( self ):
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ : List[str] = 20
UpperCAmelCase__ : Tuple = model_class_name(_lowerCAmelCase )
UpperCAmelCase__ : int = model.encode(inputs_dict["""input_ids"""] )
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
UpperCAmelCase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
UpperCAmelCase__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase__ : Tuple = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
UpperCAmelCase__ : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
UpperCAmelCase__ : List[Any] = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCAmelCase , )
UpperCAmelCase__ : str = model.decode(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"Max diff is {diff}" )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ : Tuple = 20
UpperCAmelCase__ : Union[str, Any] = model_class_name(_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = model.encode(inputs_dict["""input_ids"""] )
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
UpperCAmelCase__ : str = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase__ : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase__ : str = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
UpperCAmelCase__ : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
UpperCAmelCase__ : List[str] = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
UpperCAmelCase__ : int = model.decode(_lowerCAmelCase , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase )
UpperCAmelCase__ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"Max diff is {diff}" )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
__lowerCamelCase = 99
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase__ : int = input_ids.shape[0]
UpperCAmelCase__ : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __UpperCAmelCase ( self ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self._get_config_and_data()
UpperCAmelCase__ : Optional[Any] = FlaxBlenderbotForConditionalGeneration(_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = lm_model(input_ids=_lowerCAmelCase )
UpperCAmelCase__ : Tuple = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase__ : Tuple = FlaxBlenderbotForConditionalGeneration(_lowerCAmelCase )
UpperCAmelCase__ : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase__ : Union[str, Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase__ : int = lm_model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[str] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase__ : Optional[int] = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
UpperCAmelCase__ : List[str] = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
UpperCAmelCase__ : List[Any] = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowerCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class UpperCAmelCase_ ( __lowerCamelCase , unittest.TestCase , __lowerCamelCase ):
__lowerCamelCase = True
__lowerCamelCase = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__lowerCamelCase = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : str = FlaxBlenderbotModelTester(self )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : Dict = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : int = model_class(_lowerCAmelCase )
@jax.jit
def encode_jitted(_lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ):
return model.encode(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase__ : int = encode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase__ : Optional[int] = encode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : Tuple = model_class(_lowerCAmelCase )
UpperCAmelCase__ : Tuple = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
UpperCAmelCase__ : int = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
return model.decode(
decoder_input_ids=_lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , encoder_outputs=_lowerCAmelCase , )
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase__ : str = decode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase__ : List[str] = decode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __UpperCAmelCase ( self ):
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : Any = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase__ : List[Any] = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase__ : int = model(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""" )
@slow
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25}
UpperCAmelCase__ : str = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
UpperCAmelCase__ : List[Any] = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=_lowerCAmelCase )
UpperCAmelCase__ : Tuple = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""" )
UpperCAmelCase__ : Tuple = ["""Sam"""]
UpperCAmelCase__ : Optional[Any] = tokenizer(_lowerCAmelCase , return_tensors="""jax""" )
UpperCAmelCase__ : Optional[int] = model.generate(**_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = """Sam is a great name. It means \"sun\" in Gaelic."""
UpperCAmelCase__ : Tuple = tokenizer.batch_decode(_lowerCAmelCase , **_lowerCAmelCase )
assert generated_txt[0].strip() == tgt_text
| 79
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : UNetaDModel
_lowerCAmelCase : ScoreSdeVeScheduler
def __init__( self , lowercase__ , lowercase__):
super().__init__()
self.register_modules(unet=lowercase__ , scheduler=lowercase__)
@torch.no_grad()
def __call__( self , lowercase__ = 1 , lowercase__ = 2_0_0_0 , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , **lowercase__ , ):
__UpperCAmelCase : int = self.unet.config.sample_size
__UpperCAmelCase : Optional[int] = (batch_size, 3, img_size, img_size)
__UpperCAmelCase : int = self.unet
__UpperCAmelCase : Tuple = randn_tensor(lowercase__ , generator=lowercase__) * self.scheduler.init_noise_sigma
__UpperCAmelCase : List[str] = sample.to(self.device)
self.scheduler.set_timesteps(lowercase__)
self.scheduler.set_sigmas(lowercase__)
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
__UpperCAmelCase : List[str] = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device)
# correction step
for _ in range(self.scheduler.config.correct_steps):
__UpperCAmelCase : int = self.unet(lowercase__ , lowercase__).sample
__UpperCAmelCase : str = self.scheduler.step_correct(lowercase__ , lowercase__ , generator=lowercase__).prev_sample
# prediction step
__UpperCAmelCase : Tuple = model(lowercase__ , lowercase__).sample
__UpperCAmelCase : Dict = self.scheduler.step_pred(lowercase__ , lowercase__ , lowercase__ , generator=lowercase__)
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = output.prev_sample, output.prev_sample_mean
__UpperCAmelCase : List[str] = sample_mean.clamp(0 , 1)
__UpperCAmelCase : Union[str, Any] = sample.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__UpperCAmelCase : List[Any] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowercase__)
| 462
| 0
|
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 486
|
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def A_( A , A , **A ):
UpperCAmelCase_ = AutoConfig.from_pretrained(A , **A )
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_config(A )
model.save_pretrained(A )
AutoTokenizer.from_pretrained(A ).save_pretrained(A )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 486
| 1
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase__ ) , """Tatoeba directory does not exist.""" )
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
self.resolver.convert_models(['heb-eng'] )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ , UpperCamelCase_ = self.resolver.write_model_card('opus-mt-he-en' , dry_run=_UpperCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 23
|
import requests
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = {'Content-Type': 'application/json'}
UpperCamelCase_ = requests.post(__lowercase , json={'text': message_body} , headers=__lowercase)
if response.status_code != 200:
UpperCamelCase_ = (
'Request to slack returned an error '
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(__lowercase)
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 23
| 1
|
def A (__A : str ) -> list:
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__A ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 719
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A (__A : int ) -> bool:
"""simple docstring"""
UpperCAmelCase_ = int(number**0.5 )
return number == sq * sq
def A (__A : int , __A : int , __A : int , __A : int , __A : int , __A : int ) -> tuple[int, int]:
"""simple docstring"""
UpperCAmelCase_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCAmelCase_ = x_den * y_den * z_den
UpperCAmelCase_ = gcd(__A , __A )
top //= hcf
bottom //= hcf
return top, bottom
def A (__A : int = 35 ) -> int:
"""simple docstring"""
UpperCAmelCase_ = set()
UpperCAmelCase_ = 42
UpperCAmelCase_ = Fraction(0 )
UpperCAmelCase_ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCAmelCase_ = x_num * y_den + x_den * y_num
UpperCAmelCase_ = x_den * y_den
UpperCAmelCase_ = gcd(__A , __A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
__A , __A , __A , __A , __A , __A )
unique_s.add(__A )
# n=2
UpperCAmelCase_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCAmelCase_ = x_den * x_den * y_den * y_den
if is_sq(__A ) and is_sq(__A ):
UpperCAmelCase_ = int(sqrt(__A ) )
UpperCAmelCase_ = int(sqrt(__A ) )
UpperCAmelCase_ = gcd(__A , __A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
__A , __A , __A , __A , __A , __A )
unique_s.add(__A )
# n=-1
UpperCAmelCase_ = x_num * y_num
UpperCAmelCase_ = x_den * y_num + x_num * y_den
UpperCAmelCase_ = gcd(__A , __A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
__A , __A , __A , __A , __A , __A )
unique_s.add(__A )
# n=2
UpperCAmelCase_ = x_num * x_num * y_num * y_num
UpperCAmelCase_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__A ) and is_sq(__A ):
UpperCAmelCase_ = int(sqrt(__A ) )
UpperCAmelCase_ = int(sqrt(__A ) )
UpperCAmelCase_ = gcd(__A , __A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
__A , __A , __A , __A , __A , __A )
unique_s.add(__A )
for num, den in unique_s:
total += Fraction(__A , __A )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"{solution() = }")
| 169
| 0
|
'''simple docstring'''
from graphs.minimum_spanning_tree_kruskal import kruskal
def a__ ( ) -> Optional[int]:
UpperCAmelCase__ : List[str] = 9
UpperCAmelCase__ : Tuple = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
UpperCAmelCase__ : Dict = kruskal(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Any = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(lowerCAmelCase__ ) == sorted(lowerCAmelCase__ )
| 75
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
UpperCamelCase__ = {
'''facebook/bart-base''': 1_0_2_4,
'''facebook/bart-large''': 1_0_2_4,
'''facebook/bart-large-mnli''': 1_0_2_4,
'''facebook/bart-large-cnn''': 1_0_2_4,
'''facebook/bart-large-xsum''': 1_0_2_4,
'''yjernite/bart_eli5''': 1_0_2_4,
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = BartTokenizer
def __init__( self : Tuple , _A : List[str]=None , _A : Optional[Any]=None , _A : Union[str, Any]=None , _A : Tuple="replace" , _A : Optional[Any]="<s>" , _A : int="</s>" , _A : Optional[Any]="</s>" , _A : List[str]="<s>" , _A : Optional[int]="<unk>" , _A : Optional[int]="<pad>" , _A : str="<mask>" , _A : Dict=False , _A : int=True , **_A : Optional[Any] , ):
'''simple docstring'''
super().__init__(
_A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , )
UpperCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase__ : str = getattr(_A , pre_tok_state.pop('''type''' ) )
UpperCAmelCase__ : Any = add_prefix_space
UpperCAmelCase__ : str = pre_tok_class(**_A )
UpperCAmelCase__ : Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase__ : Optional[Any] = '''post_processor'''
UpperCAmelCase__ : List[Any] = getattr(self.backend_tokenizer , _A , _A )
if tokenizer_component_instance:
UpperCAmelCase__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase__ : Union[str, Any] = tuple(state['''sep'''] )
if "cls" in state:
UpperCAmelCase__ : Union[str, Any] = tuple(state['''cls'''] )
UpperCAmelCase__ : Dict = False
if state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase__ : Union[str, Any] = add_prefix_space
UpperCAmelCase__ : Dict = True
if state.get('''trim_offsets''' , _A ) != trim_offsets:
UpperCAmelCase__ : List[Any] = trim_offsets
UpperCAmelCase__ : List[Any] = True
if changes_to_apply:
UpperCAmelCase__ : Dict = getattr(_A , state.pop('''type''' ) )
UpperCAmelCase__ : Union[str, Any] = component_class(**_A )
setattr(self.backend_tokenizer , _A , _A )
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self : Dict , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else value
UpperCAmelCase__ : str = value
def lowercase_ ( self : Optional[int] , *_A : List[str] , **_A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_A , **_A )
def lowercase_ ( self : Optional[Any] , *_A : Union[str, Any] , **_A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_A , **_A )
def lowercase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase__ : str = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def lowercase_ ( self : Tuple , _A : Union[str, Any] , _A : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : int , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 75
| 1
|
"""simple docstring"""
def UpperCAmelCase ( A : Optional[Any] ):
'''simple docstring'''
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = f'Input value of [number={number}] must be an integer'
raise TypeError(__SCREAMING_SNAKE_CASE )
if number < 1:
_UpperCAmelCase = f'Input value of [number={number}] must be > 0'
raise ValueError(__SCREAMING_SNAKE_CASE )
_UpperCAmelCase = 1
for i in range(1 , __SCREAMING_SNAKE_CASE ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24
| 0
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_A = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCamelCase (datasets.BuilderConfig ):
'''simple docstring'''
a = 1_0_0_0_0
a = None
a = None
class lowerCamelCase (datasets.ArrowBasedBuilder ):
'''simple docstring'''
a = ParquetConfig
def lowerCAmelCase_ ( self : int ) -> Optional[int]:
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase_ ( self : Union[str, Any] , _snake_case : Optional[Any] ) -> str:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
SCREAMING_SNAKE_CASE__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_snake_case , (str, list, tuple) ):
SCREAMING_SNAKE_CASE__ = data_files
if isinstance(_snake_case , _snake_case ):
SCREAMING_SNAKE_CASE__ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE__ = [dl_manager.iter_files(_snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
SCREAMING_SNAKE_CASE__ = []
for split_name, files in data_files.items():
if isinstance(_snake_case , _snake_case ):
SCREAMING_SNAKE_CASE__ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE__ = [dl_manager.iter_files(_snake_case ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_snake_case ):
with open(_snake_case , "rb" ) as f:
SCREAMING_SNAKE_CASE__ = datasets.Features.from_arrow_schema(pq.read_schema(_snake_case ) )
break
splits.append(datasets.SplitGenerator(name=_snake_case , gen_kwargs={"files": files} ) )
return splits
def lowerCAmelCase_ ( self : List[str] , _snake_case : pa.Table ) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE__ = table_cast(_snake_case , self.info.features.arrow_schema )
return pa_table
def lowerCAmelCase_ ( self : Any , _snake_case : int ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(_snake_case ) ):
with open(_snake_case , "rb" ) as f:
SCREAMING_SNAKE_CASE__ = pq.ParquetFile(_snake_case )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
SCREAMING_SNAKE_CASE__ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(_snake_case )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(_snake_case )}: {e}""" )
raise
| 159
|
"""simple docstring"""
import math
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> bool:
SCREAMING_SNAKE_CASE__ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase = 1 / 12_345 ) -> int:
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 3
while True:
SCREAMING_SNAKE_CASE__ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = int(__UpperCAmelCase )
total_partitions += 1
if check_partition_perfect(__UpperCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__UpperCAmelCase )
integer += 1
if __name__ == "__main__":
print(F'{solution() = }')
| 159
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 531
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = "informer"
__snake_case = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "student_t" , _UpperCAmelCase = "nll" , _UpperCAmelCase = 1 , _UpperCAmelCase = None , _UpperCAmelCase = "mean" , _UpperCAmelCase = 0 , _UpperCAmelCase = 0 , _UpperCAmelCase = 0 , _UpperCAmelCase = 0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 64 , _UpperCAmelCase = 32 , _UpperCAmelCase = 32 , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = True , _UpperCAmelCase = "gelu" , _UpperCAmelCase = 0.05 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 1_00 , _UpperCAmelCase = 0.02 , _UpperCAmelCase=True , _UpperCAmelCase = "prob" , _UpperCAmelCase = 5 , _UpperCAmelCase = True , **_UpperCAmelCase , ):
# time series specific configuration
snake_case_ = prediction_length
snake_case_ = context_length or prediction_length
snake_case_ = distribution_output
snake_case_ = loss
snake_case_ = input_size
snake_case_ = num_time_features
snake_case_ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
snake_case_ = scaling
snake_case_ = num_dynamic_real_features
snake_case_ = num_static_real_features
snake_case_ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = cardinality
else:
snake_case_ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = embedding_dimension
else:
snake_case_ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case_ = num_parallel_samples
# Transformer architecture configuration
snake_case_ = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case_ = d_model
snake_case_ = encoder_attention_heads
snake_case_ = decoder_attention_heads
snake_case_ = encoder_ffn_dim
snake_case_ = decoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = decoder_layers
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = use_cache
# Informer
snake_case_ = attention_type
snake_case_ = sampling_factor
snake_case_ = distil
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
@property
def UpperCamelCase__ ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 531
| 1
|
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a :
"""simple docstring"""
@staticmethod
def __snake_case ( *lowerCamelCase : List[str] , **lowerCamelCase : Optional[int] ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
@require_torch
class a (unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __snake_case ( self : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Any ) -> Any:
__snake_case : List[str] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
__snake_case : Dict = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def __snake_case ( self : Tuple , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] ) -> List[Any]:
__snake_case : Dict = object_detector(examples[0] , threshold=0.0 )
__snake_case : Optional[int] = len(lowerCamelCase )
self.assertGreater(lowerCamelCase , 0 )
self.assertEqual(
lowerCamelCase , [
{
"score": ANY(lowerCamelCase ),
"label": ANY(lowerCamelCase ),
"box": {"xmin": ANY(lowerCamelCase ), "ymin": ANY(lowerCamelCase ), "xmax": ANY(lowerCamelCase ), "ymax": ANY(lowerCamelCase )},
}
for i in range(lowerCamelCase )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def __snake_case ( self : Optional[int] ) -> List[str]:
pass
@require_torch
def __snake_case ( self : Any ) -> str:
__snake_case : List[Any] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
__snake_case : Optional[Any] = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"score": 0.72_35, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.72_18, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.71_84, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.67_48, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.66_56, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.66_14, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.64_56, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.6_42, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.64_19, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
] , )
__snake_case : str = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"score": 0.72_35, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.72_18, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.71_84, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.67_48, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.66_56, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.66_14, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.64_56, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.6_42, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.64_19, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
]
] , )
@require_torch
@slow
def __snake_case ( self : Union[str, Any] ) -> Tuple:
__snake_case : int = pipeline("zero-shot-object-detection" )
__snake_case : Optional[Any] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.14_74, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.12_08, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
] , )
__snake_case : Optional[int] = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.14_74, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.12_08, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
[
{"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.14_74, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.12_08, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def __snake_case ( self : Optional[Any] ) -> int:
pass
@require_torch
@slow
def __snake_case ( self : Optional[int] ) -> Any:
__snake_case : List[Any] = 0.2
__snake_case : Any = pipeline("zero-shot-object-detection" )
__snake_case : Optional[Any] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=lowerCamelCase , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.25_37, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
] , )
@require_torch
@slow
def __snake_case ( self : int ) -> Union[str, Any]:
__snake_case : str = 2
__snake_case : str = pipeline("zero-shot-object-detection" )
__snake_case : int = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=lowerCamelCase , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"score": 0.28_68, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_77, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
] , )
| 81
|
'''simple docstring'''
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( __a ):
def __init__( self : int , a__ : Optional[int] , a__ : Union[str, Any]=768 ):
super().__init__(a__ )
__magic_name__ = proj_size
__magic_name__ = CLIPVisionModel(a__ )
__magic_name__ = PaintByExampleMapper(a__ )
__magic_name__ = nn.LayerNorm(config.hidden_size )
__magic_name__ = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
__magic_name__ = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def snake_case__ ( self : Tuple , a__ : Any , a__ : List[str]=False ):
__magic_name__ = self.model(pixel_values=a__ )
__magic_name__ = clip_output.pooler_output
__magic_name__ = self.mapper(latent_states[:, None] )
__magic_name__ = self.final_layer_norm(a__ )
__magic_name__ = self.proj_out(a__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Any , a__ : Dict ):
super().__init__()
__magic_name__ = (config.num_hidden_layers + 1) // 5
__magic_name__ = config.hidden_size
__magic_name__ = 1
__magic_name__ = nn.ModuleList(
[
BasicTransformerBlock(a__ , a__ , a__ , activation_fn='''gelu''' , attention_bias=a__ )
for _ in range(a__ )
] )
def snake_case__ ( self : List[str] , a__ : List[Any] ):
for block in self.blocks:
__magic_name__ = block(a__ )
return hidden_states
| 432
| 0
|
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def UpperCAmelCase__( __UpperCAmelCase : Tuple ) -> Dict:
__snake_case : Tuple = os.path.join(args.tf_model_dir , 'parameters.json' )
__snake_case : Optional[Any] = json.loads(open(__UpperCAmelCase ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith('.pt' ):
__snake_case : Optional[Any] = args.output + '.pt'
__snake_case : List[Any] = OrderedDict()
with tf.device('/CPU:0' ):
__snake_case : Tuple = tf.train.load_checkpoint(args.tf_model_dir )
__snake_case : Union[str, Any] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
__snake_case : Union[str, Any] = reader.get_tensor(__UpperCAmelCase ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
__snake_case : Optional[Any] = int(key_name[9] )
elif key_name.startswith('pasts/out' ):
__snake_case : Dict = 8
__snake_case : Optional[Any] = 'model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
__snake_case : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__snake_case : Optional[int] = torch.tensor(__UpperCAmelCase )
elif key_name.startswith('model/moe' ):
__snake_case : Optional[int] = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
__snake_case : str = 'model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
__snake_case : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__snake_case : Optional[int] = torch.tensor(__UpperCAmelCase )
elif key_name.endswith('/softmlp/kernel' ):
__snake_case : List[str] = 'model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
__snake_case : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__snake_case : Optional[Any] = torch.tensor(__UpperCAmelCase )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
__snake_case : Tuple = key_name[-9:-7]
for i in range(16 ):
__snake_case : Union[str, Any] = 'model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
__snake_case : str = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
__snake_case : Tuple = torch.tensor(__UpperCAmelCase )
elif key_name.startswith('model/mlp' ):
__snake_case : int = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
__snake_case : int = 'model.blocks.%d.feed_forward.mlp.wi.weight' % player
__snake_case : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__snake_case : Any = torch.tensor(__UpperCAmelCase )
elif key_name.endswith('/p1/bias' ):
__snake_case : Any = 'model.blocks.%d.feed_forward.mlp.wi.bias' % player
__snake_case : str = vnp.copy() # same because it is one dimensional
__snake_case : str = torch.tensor(__UpperCAmelCase )
elif key_name.endswith('/p2/kernel' ):
__snake_case : Any = 'model.blocks.%d.feed_forward.mlp.wo.weight' % player
__snake_case : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__snake_case : Optional[Any] = torch.tensor(__UpperCAmelCase )
elif key_name.endswith('/p2/bias' ):
__snake_case : str = 'model.blocks.%d.feed_forward.mlp.wo.bias' % player
__snake_case : int = vnp.copy() # same because it is one dimensional
__snake_case : Optional[Any] = torch.tensor(__UpperCAmelCase )
elif key_name.startswith('model/ln' ):
__snake_case : str = int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
__snake_case : Any = 'model.blocks.%d.feed_forward.norm.bias' % player
__snake_case : str = vnp.copy() # same because it is one dimensional
__snake_case : List[Any] = torch.tensor(__UpperCAmelCase )
elif key_name.endswith('/g' ):
__snake_case : Union[str, Any] = 'model.blocks.%d.feed_forward.norm.weight' % player
__snake_case : List[str] = vnp.copy() # same because it is one dimensional
__snake_case : List[str] = torch.tensor(__UpperCAmelCase )
elif key_name.startswith('model/att' ):
__snake_case : Optional[int] = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
__snake_case : List[Any] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
__snake_case : str = state[:, 0, :, :]
__snake_case : Tuple = state[:, 1, :, :]
__snake_case : int = state[:, 2, :, :]
__snake_case : Any = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__snake_case : Union[str, Any] = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__snake_case : Union[str, Any] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__snake_case : Optional[int] = 'model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
__snake_case : str = torch.tensor(__UpperCAmelCase )
__snake_case : List[str] = 'model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
__snake_case : Union[str, Any] = torch.tensor(__UpperCAmelCase )
__snake_case : Optional[Any] = 'model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
__snake_case : Tuple = torch.tensor(__UpperCAmelCase )
elif key_name.endswith('/o/kernel' ):
__snake_case : Dict = 'model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
__snake_case : Union[str, Any] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
__snake_case : str = torch.tensor(__UpperCAmelCase )
elif key_name.startswith('model/an' ):
__snake_case : str = int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
__snake_case : List[Any] = 'model.blocks.%d.self_attn.norm.bias' % player
__snake_case : Optional[Any] = vnp.copy() # same because it is one dimensional
__snake_case : List[str] = torch.tensor(__UpperCAmelCase )
elif key_name.endswith('/g' ):
__snake_case : str = 'model.blocks.%d.self_attn.norm.weight' % player
__snake_case : Optional[int] = vnp.copy() # same because it is one dimensional
__snake_case : Dict = torch.tensor(__UpperCAmelCase )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
__snake_case : str = {'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
__snake_case : Dict = 'model.%s.weight' % nlayer
__snake_case : Union[str, Any] = vnp.copy() # same in embedded
__snake_case : Tuple = torch.tensor(__UpperCAmelCase )
if key_name.startswith('model/wte' ):
__snake_case : Union[str, Any] = 'lm_head.weight'
__snake_case : List[str] = vnp.copy() # same in embedded
__snake_case : Optional[Any] = torch.tensor(__UpperCAmelCase )
elif key_name.startswith('model/wob' ):
__snake_case : Union[str, Any] = 'final_logits_bias'
__snake_case : Optional[int] = vnp.copy() # same in embedded
__snake_case : Tuple = state.reshape((1, -1) )
__snake_case : List[Any] = torch.tensor(__UpperCAmelCase )
elif key_name == "model/dense/kernel":
__snake_case : List[str] = 'model.last_project.weight'
__snake_case : Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__snake_case : Dict = torch.tensor(__UpperCAmelCase )
elif key_name == "model/dense_1/bias":
__snake_case : Optional[int] = 'model.last_project.bias'
__snake_case : Union[str, Any] = vnp.copy() # same because it is one dimensional
__snake_case : Any = torch.tensor(__UpperCAmelCase )
torch.save(__UpperCAmelCase , args.output )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
__magic_name__ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 718
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Any ):
# Initialise PyTorch model
__snake_case : List[str] = TaConfig.from_json_file(__UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
__snake_case : int = TaForConditionalGeneration(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 679
| 0
|
"""simple docstring"""
import os
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] )-> int:
'''simple docstring'''
UpperCAmelCase__ : Tuple = len(grid[0] )
UpperCAmelCase__ : Tuple = len(snake_case )
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : List[str] = 0
UpperCAmelCase__ : Dict = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(snake_case ):
for j in range(n_rows - 3 ):
UpperCAmelCase__ : Dict = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
UpperCAmelCase__ : Tuple = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
UpperCAmelCase__ : Tuple = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
UpperCAmelCase__ : int = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
UpperCAmelCase__ : Union[str, Any] = max(
snake_case , snake_case , snake_case , snake_case )
if max_product > largest:
UpperCAmelCase__ : Union[str, Any] = max_product
return largest
def SCREAMING_SNAKE_CASE__ ( )-> str:
'''simple docstring'''
UpperCAmelCase__ : int = []
with open(os.path.dirname(snake_case ) + "/grid.txt" ) as file:
for line in file:
grid.append(line.strip("\n" ).split(" " ) )
UpperCAmelCase__ : str = [[int(snake_case ) for i in grid[j]] for j in range(len(snake_case ) )]
return largest_product(snake_case )
if __name__ == "__main__":
print(solution())
| 438
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCAmelCase__ : str = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase__ : Tuple = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ : Optional[int] = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase__ : int = model(snake_case__ )["last_hidden_state"].detach()
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , snake_case__ , atol=1e-3 ) )
@slow
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
UpperCAmelCase__ : Tuple = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase__ : Union[str, Any] = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model(snake_case__ )["last_hidden_state"].detach()
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , snake_case__ , atol=1e-3 ) )
| 438
| 1
|
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def lowerCAmelCase_ ( _lowerCamelCase: int ):
if hor == 1_28:
__SCREAMING_SNAKE_CASE : Any = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
__SCREAMING_SNAKE_CASE : List[Any] = (32, 1_28, 2_56)
__SCREAMING_SNAKE_CASE : str = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
__SCREAMING_SNAKE_CASE : str = (32, 64, 1_28, 2_56)
__SCREAMING_SNAKE_CASE : Tuple = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(F"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
__SCREAMING_SNAKE_CASE : Any = model.state_dict()
__SCREAMING_SNAKE_CASE : Optional[Any] = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_55_36,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
__SCREAMING_SNAKE_CASE : int = UNetaDModel(**_lowerCamelCase )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
__SCREAMING_SNAKE_CASE : Optional[int] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(_lowerCamelCase )
hf_value_function.load_state_dict(_lowerCamelCase )
torch.save(hf_value_function.state_dict() , F"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(F"hub/hopper-medium-v2/unet/hor{hor}/config.json" , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : Dict = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 1_28, 2_56),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_55_36,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
__SCREAMING_SNAKE_CASE : Optional[int] = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
__SCREAMING_SNAKE_CASE : Dict = model
__SCREAMING_SNAKE_CASE : List[Any] = UNetaDModel(**_lowerCamelCase )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
__SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__SCREAMING_SNAKE_CASE : str = state_dict.pop(_lowerCamelCase )
hf_value_function.load_state_dict(_lowerCamelCase )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 178
|
'''simple docstring'''
UpperCamelCase__ : List[str] = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 178
| 1
|
'''simple docstring'''
def lowerCamelCase__ ( A_ = 10**9 ):
UpperCAmelCase_ = 1
UpperCAmelCase_ = 2
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCAmelCase_ = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'''{solution() = }''')
| 660
|
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowercase_ ( _A ):
a_ = """"""
a_ = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Tuple:
"""simple docstring"""
super().__init__(self , **UpperCamelCase__ )
UpperCAmelCase_ = repo_info
UpperCAmelCase_ = token
UpperCAmelCase_ = None
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
if self.dir_cache is None:
UpperCAmelCase_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase_ = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(UpperCamelCase__ ): {"name": str(UpperCamelCase__ ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = "rb" , **UpperCamelCase__ , ) -> Optional[int]:
"""simple docstring"""
if not isinstance(self.repo_info , UpperCamelCase__ ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
UpperCAmelCase_ = hf_hub_url(self.repo_info.id , UpperCamelCase__ , revision=self.repo_info.sha )
return fsspec.open(
UpperCamelCase__ , mode=UpperCamelCase__ , headers=get_authentication_headers_for_url(UpperCamelCase__ , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCamelCase_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
self._get_dirs()
UpperCAmelCase_ = self._strip_protocol(UpperCamelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=False , **UpperCamelCase__ ) -> str:
"""simple docstring"""
self._get_dirs()
UpperCAmelCase_ = PurePosixPath(path.strip("/" ) )
UpperCAmelCase_ = {}
for p, f in self.dir_cache.items():
UpperCAmelCase_ = PurePosixPath(p.strip("/" ) )
UpperCAmelCase_ = p.parent
if root == path:
UpperCAmelCase_ = f
UpperCAmelCase_ = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660
| 1
|
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowercase__ : str = logging.getLogger(__name__)
@dataclass(frozen=UpperCAmelCase__ )
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
_snake_case = 42
_snake_case = 42
_snake_case = None
_snake_case = None
_snake_case = None
@dataclass(frozen=UpperCAmelCase__ )
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
_snake_case = 42
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
"""simple docstring"""
_snake_case = 42
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_ = False , )-> Dict:
'''simple docstring'''
__UpperCamelCase = hans_processors[task]()
__UpperCamelCase = os.path.join(
SCREAMING_SNAKE_CASE_ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , ) , )
__UpperCamelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__UpperCamelCase = label_list[2], label_list[1]
__UpperCamelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__UpperCamelCase = cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not overwrite_cache:
logger.info(F"Loading features from cached file {cached_features_file}" )
__UpperCamelCase = torch.load(SCREAMING_SNAKE_CASE_ )
else:
logger.info(F"Creating features from dataset file at {data_dir}" )
__UpperCamelCase = (
processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
)
logger.info('''Training examples: %s''' , len(SCREAMING_SNAKE_CASE_ ) )
__UpperCamelCase = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info('''Saving features into cached file %s''' , SCREAMING_SNAKE_CASE_ )
torch.save(self.features , SCREAMING_SNAKE_CASE_ )
def __len__( self )-> str:
'''simple docstring'''
return len(self.features )
def __getitem__( self , SCREAMING_SNAKE_CASE_ )-> InputFeatures:
'''simple docstring'''
return self.features[i]
def A__ ( self )-> str:
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
_snake_case = 42
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 128 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_ = False , )-> Any:
'''simple docstring'''
__UpperCamelCase = hans_processors[task]()
__UpperCamelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__UpperCamelCase = label_list[2], label_list[1]
__UpperCamelCase = label_list
__UpperCamelCase = processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(SCREAMING_SNAKE_CASE_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__UpperCamelCase = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE_ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def A__ ( self )-> Dict:
'''simple docstring'''
return self.dataset
def __len__( self )-> Dict:
'''simple docstring'''
return len(self.features )
def __getitem__( self , SCREAMING_SNAKE_CASE_ )-> InputFeatures:
'''simple docstring'''
return self.features[i]
def A__ ( self )-> Optional[Any]:
'''simple docstring'''
return self.label_list
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
"""simple docstring"""
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> int:
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Optional[Any]:
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def A__ ( self )-> Dict:
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Tuple:
'''simple docstring'''
__UpperCamelCase = []
for i, line in enumerate(SCREAMING_SNAKE_CASE_ ):
if i == 0:
continue
__UpperCamelCase = """%s-%s""" % (set_type, line[0])
__UpperCamelCase = line[5]
__UpperCamelCase = line[6]
__UpperCamelCase = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
__UpperCamelCase = line[0]
examples.append(InputExample(guid=SCREAMING_SNAKE_CASE_ , text_a=SCREAMING_SNAKE_CASE_ , text_b=SCREAMING_SNAKE_CASE_ , label=SCREAMING_SNAKE_CASE_ , pairID=SCREAMING_SNAKE_CASE_ ) )
return examples
def A_ ( snake_case : List[str] , snake_case : List[str] , snake_case : Optional[Any] , snake_case : Dict , ) -> int:
'''simple docstring'''
__UpperCamelCase = {label: i for i, label in enumerate(snake_case )}
__UpperCamelCase = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case ) , desc='''convert examples to features''' ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
__UpperCamelCase = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case , max_length=snake_case , padding='''max_length''' , truncation=snake_case , return_overflowing_tokens=snake_case , )
__UpperCamelCase = label_map[example.label] if example.label in label_map else 0
__UpperCamelCase = int(example.pairID )
features.append(InputFeatures(**snake_case , label=snake_case , pairID=snake_case ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"guid: {example}" )
logger.info(f"features: {features[i]}" )
return features
lowercase__ : Any = {
"hans": 3,
}
lowercase__ : Dict = {
"hans": HansProcessor,
}
| 714
|
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
def A_ ( snake_case : List[str] ) -> List[str]:
'''simple docstring'''
print('''Loading config file...''' )
def flatten_yaml_as_dict(snake_case : Optional[int] , snake_case : List[Any]="" , snake_case : str="." ):
__UpperCamelCase = []
for k, v in d.items():
__UpperCamelCase = parent_key + sep + k if parent_key else k
if isinstance(snake_case , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case , snake_case , sep=snake_case ).items() )
else:
items.append((new_key, v) )
return dict(snake_case )
__UpperCamelCase = argparse.Namespace()
with open(snake_case , '''r''' ) as yaml_file:
try:
__UpperCamelCase = yaml.load(snake_case , Loader=yaml.FullLoader )
__UpperCamelCase = flatten_yaml_as_dict(snake_case )
for k, v in flat_cfg.items():
setattr(snake_case , snake_case , snake_case )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(snake_case , str(snake_case ) ) )
return config
def A_ ( snake_case : List[Any] , snake_case : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = MobileViTVaConfig()
__UpperCamelCase = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
__UpperCamelCase = 1000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__UpperCamelCase = 384
else:
__UpperCamelCase = 256
__UpperCamelCase = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
__UpperCamelCase = 21000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__UpperCamelCase = 384
else:
__UpperCamelCase = 256
__UpperCamelCase = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
__UpperCamelCase = 151
__UpperCamelCase = 512
__UpperCamelCase = '''ade20k-id2label.json'''
__UpperCamelCase = True
elif task_name.startswith('''voc_''' ):
__UpperCamelCase = 21
__UpperCamelCase = 512
__UpperCamelCase = '''pascal-voc-id2label.json'''
__UpperCamelCase = True
# orig_config
__UpperCamelCase = load_orig_config_file(snake_case )
assert getattr(snake_case , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
__UpperCamelCase = getattr(snake_case , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(snake_case , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
__UpperCamelCase = getattr(snake_case , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
__UpperCamelCase = getattr(snake_case , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
__UpperCamelCase = getattr(snake_case , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
__UpperCamelCase = getattr(snake_case , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
__UpperCamelCase = getattr(snake_case , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
__UpperCamelCase = '''huggingface/label-files'''
__UpperCamelCase = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
__UpperCamelCase = {int(snake_case ): v for k, v in idalabel.items()}
__UpperCamelCase = idalabel
__UpperCamelCase = {v: k for k, v in idalabel.items()}
return config
def A_ ( snake_case : List[Any] , snake_case : int , snake_case : Any ) -> str:
'''simple docstring'''
__UpperCamelCase = dct.pop(snake_case )
__UpperCamelCase = val
def A_ ( snake_case : int , snake_case : List[Any]=False ) -> Optional[Any]:
'''simple docstring'''
if base_model:
__UpperCamelCase = ''''''
else:
__UpperCamelCase = '''mobilevitv2.'''
__UpperCamelCase = []
for k in state_dict.keys():
if k[:8] == "encoder.":
__UpperCamelCase = k[8:]
else:
__UpperCamelCase = k
if ".block." in k:
__UpperCamelCase = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
__UpperCamelCase = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
__UpperCamelCase = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
__UpperCamelCase = k_new.replace('''conv_1.''' , f"{model_prefix}conv_stem." )
for i in [1, 2]:
if f"layer_{i}." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}." , f"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
__UpperCamelCase = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
__UpperCamelCase = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f"layer_{i}.0." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.0." , f"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if f"layer_{i}.1.local_rep.0." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.1.local_rep.0." , f"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if f"layer_{i}.1.local_rep.1." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.1.local_rep.1." , f"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
__UpperCamelCase = [0, 1]
elif i == 4:
__UpperCamelCase = [0, 1, 2, 3]
elif i == 5:
__UpperCamelCase = [0, 1, 2]
for j in j_in:
if f"layer_{i}.1.global_rep.{j}." in k:
__UpperCamelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j}." , f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if f"layer_{i}.1.global_rep.{j+1}." in k:
__UpperCamelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j+1}." , f"{model_prefix}encoder.layer.{i-1}.layernorm." )
if f"layer_{i}.1.conv_proj." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.1.conv_proj." , f"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
__UpperCamelCase = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
__UpperCamelCase = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
__UpperCamelCase = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
__UpperCamelCase = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
__UpperCamelCase = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
__UpperCamelCase = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
__UpperCamelCase = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
__UpperCamelCase = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
__UpperCamelCase = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def A_ ( snake_case : List[str] ) -> str:
'''simple docstring'''
__UpperCamelCase = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(snake_case )
for k in keys_to_ignore:
state_dict.pop(snake_case , snake_case )
def A_ ( ) -> str:
'''simple docstring'''
__UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
__UpperCamelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def A_ ( snake_case : Dict , snake_case : List[str] , snake_case : Optional[Any] , snake_case : Optional[int] ) -> int:
'''simple docstring'''
__UpperCamelCase = get_mobilevitva_config(snake_case , snake_case )
# load original state_dict
__UpperCamelCase = torch.load(snake_case , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
__UpperCamelCase = MobileViTVaForSemanticSegmentation(snake_case ).eval()
__UpperCamelCase = False
else:
__UpperCamelCase = MobileViTVaForImageClassification(snake_case ).eval()
__UpperCamelCase = False
# remove and rename some keys of load the original model
__UpperCamelCase = checkpoint
remove_unused_keys(snake_case )
__UpperCamelCase = create_rename_keys(snake_case , base_model=snake_case )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
# load modified state_dict
model.load_state_dict(snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
__UpperCamelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__UpperCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCamelCase = model(**snake_case )
# verify classification model
if task_name.startswith('''imagenet''' ):
__UpperCamelCase = outputs.logits
__UpperCamelCase = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
__UpperCamelCase = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] )
assert torch.allclose(logits[0, :3] , snake_case , atol=1e-4 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(f"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
lowercase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
lowercase__ : Tuple = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 451
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
a = logging.get_logger(__name__)
class __a ( _snake_case ):
def __init__( self : Dict ,*lowerCamelCase : Any ,**lowerCamelCase : str ):
'''simple docstring'''
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" ,lowerCamelCase ,)
super().__init__(*lowerCamelCase ,**lowerCamelCase )
| 109
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : int = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class A ( __snake_case ):
__magic_name__ = '''deta'''
__magic_name__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=900 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=1024 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="relu" , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="sine" , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=300 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.25 , **SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A : Optional[Any] = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : List[Any] = backbone_config.pop('''model_type''' )
A : List[Any] = CONFIG_MAPPING[backbone_model_type]
A : Optional[int] = config_class.from_dict(SCREAMING_SNAKE_CASE )
A : str = backbone_config
A : Optional[int] = num_queries
A : Dict = max_position_embeddings
A : Optional[Any] = d_model
A : Optional[Any] = encoder_ffn_dim
A : List[str] = encoder_layers
A : Tuple = encoder_attention_heads
A : Optional[Any] = decoder_ffn_dim
A : Optional[int] = decoder_layers
A : List[str] = decoder_attention_heads
A : Union[str, Any] = dropout
A : str = attention_dropout
A : Any = activation_dropout
A : Optional[int] = activation_function
A : Tuple = init_std
A : Any = init_xavier_std
A : Optional[Any] = encoder_layerdrop
A : int = auxiliary_loss
A : Dict = position_embedding_type
# deformable attributes
A : str = num_feature_levels
A : Optional[int] = encoder_n_points
A : Any = decoder_n_points
A : Tuple = two_stage
A : Dict = two_stage_num_proposals
A : List[str] = with_box_refine
A : List[str] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
A : Dict = class_cost
A : Optional[int] = bbox_cost
A : Optional[Any] = giou_cost
# Loss coefficients
A : int = mask_loss_coefficient
A : int = dice_loss_coefficient
A : Tuple = bbox_loss_coefficient
A : int = giou_loss_coefficient
A : Dict = eos_coefficient
A : Optional[Any] = focal_alpha
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return self.d_model
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Any = copy.deepcopy(self.__dict__ )
A : Dict = self.backbone_config.to_dict()
A : List[Any] = self.__class__.model_type
return output
| 634
| 0
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('''socket.socket''' )
@patch('''builtins.open''' )
def UpperCamelCase ( _a , _a ) -> Dict:
'''simple docstring'''
lowercase_ :Tuple = Mock()
lowercase_ :List[Any] = conn, Mock()
lowercase_ :Any = iter([1, None] )
lowercase_ :List[Any] = lambda _a : next(__lowercase )
# ===== invoke =====
send_file(filename='''mytext.txt''' , testing=__lowercase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 704
|
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=4 , ):
lowercase_ :Union[str, Any] = parent
lowercase_ :int = batch_size
lowercase_ :int = seq_length
lowercase_ :str = is_training
lowercase_ :Dict = use_attention_mask
lowercase_ :List[Any] = use_token_type_ids
lowercase_ :str = use_labels
lowercase_ :str = vocab_size
lowercase_ :Optional[int] = hidden_size
lowercase_ :Dict = num_hidden_layers
lowercase_ :List[str] = num_attention_heads
lowercase_ :int = intermediate_size
lowercase_ :Union[str, Any] = hidden_act
lowercase_ :Optional[int] = hidden_dropout_prob
lowercase_ :Tuple = attention_probs_dropout_prob
lowercase_ :int = max_position_embeddings
lowercase_ :List[Any] = type_vocab_size
lowercase_ :Any = type_sequence_label_size
lowercase_ :Tuple = initializer_range
lowercase_ :Any = num_choices
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :List[str] = None
if self.use_attention_mask:
lowercase_ :Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ :List[Any] = None
if self.use_token_type_ids:
lowercase_ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ :Union[str, Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ :List[str] = config_and_inputs
lowercase_ :Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCamelCase ( self ):
lowercase_ :Dict = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ :Dict = config_and_inputs
lowercase_ :str = True
lowercase_ :Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : int =True
lowercase : Dict =(
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = FlaxBertModelTester(self )
@slow
def UpperCamelCase ( self ):
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
lowercase_ :Dict = FlaxBertModel.from_pretrained('''bert-base-cased''' )
lowercase_ :str = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
| 441
| 0
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json",
"BridgeTower/bridgetower-base-itm-mlm": (
"https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"
),
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Any = '''bridgetower_vision_model'''
def __init__( self, A=768, A=12, A=3, A=16, A=288, A=1, A=1E-05, A=False, A=True, A=False, **A, ):
'''simple docstring'''
super().__init__(**A )
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : str = patch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : int = initializer_factor
SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : Any = stop_gradient
SCREAMING_SNAKE_CASE : Optional[Any] = share_layernorm
SCREAMING_SNAKE_CASE : Optional[int] = remove_last_layer
@classmethod
def UpperCamelCase_ ( cls, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = cls.get_config_dict(A, **A )
if config_dict.get('model_type' ) == "bridgetower":
SCREAMING_SNAKE_CASE : Optional[int] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls, 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(A, **A )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : str = '''bridgetower_text_model'''
def __init__( self, A=50_265, A=768, A=12, A=12, A=1, A=3_072, A="gelu", A=0.1, A=0.1, A=514, A=1, A=1E-05, A=1, A=0, A=2, A="absolute", A=True, **A, ):
'''simple docstring'''
super().__init__(**A )
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Tuple = initializer_factor
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = position_embedding_type
SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE : List[Any] = pad_token_id
SCREAMING_SNAKE_CASE : List[str] = bos_token_id
SCREAMING_SNAKE_CASE : List[str] = eos_token_id
@classmethod
def UpperCamelCase_ ( cls, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = cls.get_config_dict(A, **A )
if config_dict.get('model_type' ) == "bridgetower":
SCREAMING_SNAKE_CASE : int = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls, 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(A, **A )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Union[str, Any] = '''bridgetower'''
def __init__( self, A=True, A="gelu", A=768, A=1, A=1E-05, A=False, A="add", A=12, A=6, A=False, A=False, A=None, A=None, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('text_config_dict', A )
SCREAMING_SNAKE_CASE : List[str] = kwargs.pop('vision_config_dict', A )
super().__init__(**A )
SCREAMING_SNAKE_CASE : Union[str, Any] = share_cross_modal_transformer_layers
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_factor
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = share_link_tower_layers
SCREAMING_SNAKE_CASE : int = link_tower_type
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = tie_word_embeddings
SCREAMING_SNAKE_CASE : str = init_layernorm_from_vision_encoder
if text_config is None:
SCREAMING_SNAKE_CASE : int = {}
logger.info('`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.' )
if vision_config is None:
SCREAMING_SNAKE_CASE : int = {}
logger.info('`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.' )
SCREAMING_SNAKE_CASE : List[str] = BridgeTowerTextConfig(**A )
SCREAMING_SNAKE_CASE : Tuple = BridgeTowerVisionConfig(**A )
@classmethod
def UpperCamelCase_ ( cls, A, A, **A ):
'''simple docstring'''
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : List[str] = self.text_config.to_dict()
SCREAMING_SNAKE_CASE : Tuple = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : List[Any] = self.__class__.model_type
return output
| 28
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
SCREAMING_SNAKE_CASE_ = False
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self ) -> List[Any]:
__a : Any = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__a : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__a : Optional[Any] = torch.manual_seed(0 )
__a : Any = pipe(
image=_A , generator=_A , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__a : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__a : Tuple = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 597
| 0
|
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = 'ylacombe/bark-small'
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = 'en_speaker_1'
_UpperCAmelCase = 'This is a test string'
_UpperCAmelCase = 'speaker_embeddings_path.json'
_UpperCAmelCase = 'speaker_embeddings'
def lowerCamelCase_ ( self , **snake_case ) -> Optional[int]:
return AutoTokenizer.from_pretrained(self.checkpoint , **snake_case )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BarkProcessor(tokenizer=snake_case )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_UpperCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_UpperCAmelCase = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_UpperCAmelCase = 35
_UpperCAmelCase = 2
_UpperCAmelCase = 8
_UpperCAmelCase = {
'semantic_prompt': np.ones(snake_case ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_UpperCAmelCase = processor(text=self.input_string , voice_preset=snake_case )
_UpperCAmelCase = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(snake_case , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_UpperCAmelCase = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(snake_case , **snake_case )
_UpperCAmelCase = processor(text=self.input_string , voice_preset=snake_case )
_UpperCAmelCase = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(snake_case , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_UpperCAmelCase = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BarkProcessor(tokenizer=snake_case )
_UpperCAmelCase = processor(text=self.input_string )
_UpperCAmelCase = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=snake_case , return_attention_mask=snake_case , return_token_type_ids=snake_case , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 24
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowercase = logging.get_logger(__name__)
class lowercase__ ( A ):
'''simple docstring'''
def __init__( self , *snake_case , **snake_case ) -> None:
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , snake_case , )
super().__init__(*snake_case , **snake_case )
| 24
| 1
|
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[Any], _snake_case : Optional[NestedDataStructureLike[PathLike]] = None, _snake_case : Optional[NamedSplit] = None, _snake_case : Optional[Features] = None, _snake_case : str = None, _snake_case : bool = False, _snake_case : bool = False, _snake_case : Optional[int] = None, **_snake_case : Optional[Any], ) ->str:
snake_case__ : Union[str, Any] = path_or_paths
snake_case__ : List[str] = split if split or isinstance(_snake_case, _snake_case ) else 'train'
snake_case__ : List[str] = features
snake_case__ : List[Any] = cache_dir
snake_case__ : List[str] = keep_in_memory
snake_case__ : List[str] = streaming
snake_case__ : List[str] = num_proc
snake_case__ : str = kwargs
@abstractmethod
def lowercase_ ( self : List[str] ) ->Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : int, _snake_case : Optional[Features] = None, _snake_case : str = None, _snake_case : bool = False, _snake_case : bool = False, _snake_case : Optional[int] = None, **_snake_case : List[str], ) ->Tuple:
snake_case__ : str = features
snake_case__ : List[Any] = cache_dir
snake_case__ : int = keep_in_memory
snake_case__ : Any = streaming
snake_case__ : Tuple = num_proc
snake_case__ : int = kwargs
@abstractmethod
def lowercase_ ( self : List[Any] ) ->Union[Dataset, IterableDataset]:
pass
| 478
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ :int = 16
a_ :Tuple = 32
def lowercase_ (A : Accelerator , A : int = 1_6 ):
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
snake_case__ : Tuple = load_dataset('glue' , 'mrpc' )
def tokenize_function(A : int ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case__ : Union[str, Any] = datasets.map(
A , batched=A , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ : List[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(A : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case__ : Any = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case__ : Tuple = 1_6
elif accelerator.mixed_precision != "no":
snake_case__ : Optional[int] = 8
else:
snake_case__ : Optional[Any] = None
return tokenizer.pad(
A , padding='longest' , max_length=A , pad_to_multiple_of=A , return_tensors='pt' , )
# Instantiate dataloaders.
snake_case__ : Union[str, Any] = DataLoader(
tokenized_datasets['train'] , shuffle=A , collate_fn=A , batch_size=A )
snake_case__ : int = DataLoader(
tokenized_datasets['validation'] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ :Tuple = mocked_dataloaders # noqa: F811
def lowercase_ (A : List[str] , A : Optional[Any] ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , A ) == "1":
snake_case__ : int = 2
# Initialize accelerator
snake_case__ : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ : Any = config['lr']
snake_case__ : Dict = int(config['num_epochs'] )
snake_case__ : Tuple = int(config['seed'] )
snake_case__ : Any = int(config['batch_size'] )
snake_case__ : Optional[int] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
snake_case__ : Dict = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case__ : int = batch_size // MAX_GPU_BATCH_SIZE
snake_case__ : List[Any] = MAX_GPU_BATCH_SIZE
set_seed(A )
snake_case__ , snake_case__ : str = get_dataloaders(A , A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case__ : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
snake_case__ : int = AdamW(params=model.parameters() , lr=A )
# Instantiate scheduler
snake_case__ : Any = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=1_0_0 , num_training_steps=(len(A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = accelerator.prepare(
A , A , A , A , A )
# Now we train the model
for epoch in range(A ):
model.train()
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case__ : int = model(**A )
snake_case__ : Optional[Any] = outputs.loss
snake_case__ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
snake_case__ : Optional[Any] = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ : Optional[Any] = model(**A )
snake_case__ : Dict = outputs.logits.argmax(dim=-1 )
snake_case__ , snake_case__ : int = accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(A ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
snake_case__ : Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case__ : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=A , references=A , )
snake_case__ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , A )
def lowercase_ ():
snake_case__ : Optional[Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=A , default=A , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
snake_case__ : str = parser.parse_args()
snake_case__ : Any = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(A , A )
if __name__ == "__main__":
main()
| 478
| 1
|
from collections.abc import Generator
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =0, 1
while True:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =b, a + b
yield b
def lowerCamelCase( a__ = 1000):
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =fibonacci_generator()
while len(str(next(a__))) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 191
|
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
def lowerCamelCase( a__):
if isinstance(a__ ,(list, tuple)) and isinstance(videos[0] ,(list, tuple)) and is_valid_image(videos[0][0]):
return videos
elif isinstance(a__ ,(list, tuple)) and is_valid_image(videos[0]):
return [videos]
elif is_valid_image(a__):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}")
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = ["pixel_values"]
def __init__( self : Tuple , _a : bool = True , _a : Dict[str, int] = None , _a : PILImageResampling = PILImageResampling.BILINEAR , _a : bool = True , _a : Dict[str, int] = None , _a : bool = True , _a : Union[int, float] = 1 / 255 , _a : bool = True , _a : bool = True , _a : Optional[Union[float, List[float]]] = None , _a : Optional[Union[float, List[float]]] = None , **_a : Any , ) -> None:
"""simple docstring"""
super().__init__(**_a )
_SCREAMING_SNAKE_CASE =size if size is not None else {'''shortest_edge''': 256}
_SCREAMING_SNAKE_CASE =get_size_dict(_a , default_to_square=_a )
_SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_SCREAMING_SNAKE_CASE =get_size_dict(_a , param_name='''crop_size''' )
_SCREAMING_SNAKE_CASE =do_resize
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_center_crop
_SCREAMING_SNAKE_CASE =crop_size
_SCREAMING_SNAKE_CASE =resample
_SCREAMING_SNAKE_CASE =do_rescale
_SCREAMING_SNAKE_CASE =rescale_factor
_SCREAMING_SNAKE_CASE =offset
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_SCREAMING_SNAKE_CASE =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self : List[Any] , _a : np.ndarray , _a : Dict[str, int] , _a : PILImageResampling = PILImageResampling.BILINEAR , _a : Optional[Union[str, ChannelDimension]] = None , **_a : List[Any] , ) -> np.ndarray:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =get_size_dict(_a , default_to_square=_a )
if "shortest_edge" in size:
_SCREAMING_SNAKE_CASE =get_resize_output_image_size(_a , size['''shortest_edge'''] , default_to_square=_a )
elif "height" in size and "width" in size:
_SCREAMING_SNAKE_CASE =(size['''height'''], size['''width'''])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __UpperCamelCase ( self : int , _a : np.ndarray , _a : Dict[str, int] , _a : Optional[Union[str, ChannelDimension]] = None , **_a : Dict , ) -> np.ndarray:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(_a , size=(size['''height'''], size['''width''']) , data_format=_a , **_a )
def __UpperCamelCase ( self : Dict , _a : np.ndarray , _a : Union[int, float] , _a : bool = True , _a : Optional[Union[str, ChannelDimension]] = None , **_a : List[str] , ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =image.astype(np.floataa )
if offset:
_SCREAMING_SNAKE_CASE =image - (scale / 2)
return rescale(_a , scale=_a , data_format=_a , **_a )
def __UpperCamelCase ( self : List[str] , _a : np.ndarray , _a : Union[float, List[float]] , _a : Union[float, List[float]] , _a : Optional[Union[str, ChannelDimension]] = None , **_a : Any , ) -> np.ndarray:
"""simple docstring"""
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __UpperCamelCase ( self : Tuple , _a : ImageInput , _a : bool = None , _a : Dict[str, int] = None , _a : PILImageResampling = None , _a : bool = None , _a : Dict[str, int] = None , _a : bool = None , _a : float = None , _a : bool = None , _a : bool = None , _a : Optional[Union[float, List[float]]] = None , _a : Optional[Union[float, List[float]]] = None , _a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE =to_numpy_array(_a )
if do_resize:
_SCREAMING_SNAKE_CASE =self.resize(image=_a , size=_a , resample=_a )
if do_center_crop:
_SCREAMING_SNAKE_CASE =self.center_crop(_a , size=_a )
if do_rescale:
_SCREAMING_SNAKE_CASE =self.rescale(image=_a , scale=_a , offset=_a )
if do_normalize:
_SCREAMING_SNAKE_CASE =self.normalize(image=_a , mean=_a , std=_a )
_SCREAMING_SNAKE_CASE =to_channel_dimension_format(_a , _a )
return image
def __UpperCamelCase ( self : Tuple , _a : ImageInput , _a : bool = None , _a : Dict[str, int] = None , _a : PILImageResampling = None , _a : bool = None , _a : Dict[str, int] = None , _a : bool = None , _a : float = None , _a : bool = None , _a : bool = None , _a : Optional[Union[float, List[float]]] = None , _a : Optional[Union[float, List[float]]] = None , _a : Optional[Union[str, TensorType]] = None , _a : ChannelDimension = ChannelDimension.FIRST , **_a : str , ) -> PIL.Image.Image:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE =resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE =do_center_crop if do_center_crop is not None else self.do_center_crop
_SCREAMING_SNAKE_CASE =do_rescale if do_rescale is not None else self.do_rescale
_SCREAMING_SNAKE_CASE =rescale_factor if rescale_factor is not None else self.rescale_factor
_SCREAMING_SNAKE_CASE =offset if offset is not None else self.offset
_SCREAMING_SNAKE_CASE =do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE =image_mean if image_mean is not None else self.image_mean
_SCREAMING_SNAKE_CASE =image_std if image_std is not None else self.image_std
_SCREAMING_SNAKE_CASE =size if size is not None else self.size
_SCREAMING_SNAKE_CASE =get_size_dict(_a , default_to_square=_a )
_SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else self.crop_size
_SCREAMING_SNAKE_CASE =get_size_dict(_a , param_name='''crop_size''' )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
_SCREAMING_SNAKE_CASE =make_batched(_a )
_SCREAMING_SNAKE_CASE =[
[
self._preprocess_image(
image=_a , do_resize=_a , size=_a , resample=_a , do_center_crop=_a , crop_size=_a , do_rescale=_a , rescale_factor=_a , offset=_a , do_normalize=_a , image_mean=_a , image_std=_a , data_format=_a , )
for img in video
]
for video in videos
]
_SCREAMING_SNAKE_CASE ={'''pixel_values''': videos}
return BatchFeature(data=_a , tensor_type=_a )
| 191
| 1
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = BertJapaneseTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
def _snake_case ( self : Dict ):
super().setUp()
SCREAMING_SNAKE_CASE = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _snake_case ( self : Tuple , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = "こんにちは、世界。 \nこんばんは、世界。"
SCREAMING_SNAKE_CASE = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def _snake_case ( self : str , __lowerCamelCase : Any ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_input_output_texts(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.decode(__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase )
return text, ids
def _snake_case ( self : Optional[int] ):
pass # TODO add if relevant
def _snake_case ( self : List[Any] ):
pass # TODO add if relevant
def _snake_case ( self : List[Any] ):
pass # TODO add if relevant
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(__lowerCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(__lowerCamelCase )
SCREAMING_SNAKE_CASE = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(__lowerCamelCase , "wb" ) as handle:
pickle.dump(__lowerCamelCase , __lowerCamelCase )
with open(__lowerCamelCase , "rb" ) as handle:
SCREAMING_SNAKE_CASE = pickle.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _snake_case ( self : int ):
try:
SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _snake_case ( self : Optional[Any] ):
try:
SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = MecabTokenizer(do_lower_case=__lowerCamelCase , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _snake_case ( self : Union[str, Any] ):
try:
SCREAMING_SNAKE_CASE = MecabTokenizer(
do_lower_case=__lowerCamelCase , normalize_text=__lowerCamelCase , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = MecabTokenizer(normalize_text=__lowerCamelCase , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(__lowerCamelCase )
SCREAMING_SNAKE_CASE = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(__lowerCamelCase , "wb" ) as handle:
pickle.dump(__lowerCamelCase , __lowerCamelCase )
with open(__lowerCamelCase , "rb" ) as handle:
SCREAMING_SNAKE_CASE = pickle.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@require_sudachi
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = SudachiTokenizer(do_lower_case=__lowerCamelCase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = SudachiTokenizer(normalize_text=__lowerCamelCase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = SudachiTokenizer(trim_whitespace=__lowerCamelCase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(__lowerCamelCase )
SCREAMING_SNAKE_CASE = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(__lowerCamelCase , "wb" ) as handle:
pickle.dump(__lowerCamelCase , __lowerCamelCase )
with open(__lowerCamelCase , "rb" ) as handle:
SCREAMING_SNAKE_CASE = pickle.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@require_jumanpp
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = JumanppTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = JumanppTokenizer(normalize_text=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = JumanppTokenizer(trim_whitespace=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=__lowerCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
SCREAMING_SNAKE_CASE = tokenizer.subword_tokenizer
SCREAMING_SNAKE_CASE = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(__lowerCamelCase , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
SCREAMING_SNAKE_CASE = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(__lowerCamelCase , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
SCREAMING_SNAKE_CASE = tokenizer.encode("ありがとう。" , add_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.encode("どういたしまして。" , add_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = BertJapaneseTokenizer
lowerCamelCase__ = False
def _snake_case ( self : Optional[Any] ):
super().setUp()
SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _snake_case ( self : str , **__lowerCamelCase : Optional[int] ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **__lowerCamelCase )
def _snake_case ( self : Dict , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = "こんにちは、世界。 \nこんばんは、世界。"
SCREAMING_SNAKE_CASE = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def _snake_case ( self : Tuple ):
pass # TODO add if relevant
def _snake_case ( self : Tuple ):
pass # TODO add if relevant
def _snake_case ( self : Any ):
pass # TODO add if relevant
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
SCREAMING_SNAKE_CASE = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
__lowerCamelCase , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = CharacterTokenizer(vocab=__lowerCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
SCREAMING_SNAKE_CASE = tokenizer.encode("ありがとう。" , add_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.encode("どういたしまして。" , add_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = "cl-tohoku/bert-base-japanese"
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(__lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
SCREAMING_SNAKE_CASE = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(__lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 16
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__a : Union[str, Any] = logging.get_logger(__name__)
# General docstring
__a : List[str] = "MobileNetV1Config"
# Base docstring
__a : int = "google/mobilenet_v1_1.0_224"
__a : List[Any] = [1, 1024, 7, 7]
# Image classification docstring
__a : Optional[int] = "google/mobilenet_v1_1.0_224"
__a : List[Any] = "tabby, tabby cat"
__a : Union[str, Any] = [
"google/mobilenet_v1_1.0_224",
"google/mobilenet_v1_0.75_192",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _SCREAMING_SNAKE_CASE ( __lowercase : List[str] , __lowercase : str , __lowercase : Union[str, Any]=None ) -> Any:
"""simple docstring"""
__A = {}
if isinstance(__lowercase , __lowercase ):
__A = model.mobilenet_va
else:
__A = model
__A = """MobilenetV1/Conv2d_0/"""
__A = backbone.conv_stem.convolution.weight
__A = backbone.conv_stem.normalization.bias
__A = backbone.conv_stem.normalization.weight
__A = backbone.conv_stem.normalization.running_mean
__A = backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
__A = i + 1
__A = i * 2
__A = backbone.layer[pt_index]
__A = f"MobilenetV1/Conv2d_{tf_index}_depthwise/"
__A = pointer.convolution.weight
__A = pointer.normalization.bias
__A = pointer.normalization.weight
__A = pointer.normalization.running_mean
__A = pointer.normalization.running_var
__A = backbone.layer[pt_index + 1]
__A = f"MobilenetV1/Conv2d_{tf_index}_pointwise/"
__A = pointer.convolution.weight
__A = pointer.normalization.bias
__A = pointer.normalization.weight
__A = pointer.normalization.running_mean
__A = pointer.normalization.running_var
if isinstance(__lowercase , __lowercase ):
__A = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
__A = model.classifier.weight
__A = model.classifier.bias
return tf_to_pt_map
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[Any] , __lowercase : int , __lowercase : int ) -> str:
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
__A = tf.train.list_variables(__lowercase )
__A = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}" )
__A = tf.train.load_variable(__lowercase , __lowercase )
__A = array
# Build TF to PyTorch weights loading map
__A = _build_tf_to_pytorch_map(__lowercase , __lowercase , __lowercase )
for name, pointer in tf_to_pt_map.items():
logger.info(f"Importing {name}" )
if name not in tf_weights:
logger.info(f"{name} not in tf pre-trained weights, skipping" )
continue
__A = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
__A = np.transpose(__lowercase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
__A = array.squeeze().transpose()
else:
__A = np.transpose(__lowercase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(f"Initialize PyTorch weight {name} {array.shape}" )
__A = torch.from_numpy(__lowercase )
tf_weights.pop(__lowercase , __lowercase )
tf_weights.pop(name + """/RMSProp""" , __lowercase )
tf_weights.pop(name + """/RMSProp_1""" , __lowercase )
tf_weights.pop(name + """/ExponentialMovingAverage""" , __lowercase )
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def _SCREAMING_SNAKE_CASE ( __lowercase : torch.Tensor , __lowercase : nn.Convad ) -> torch.Tensor:
"""simple docstring"""
__A , __A = features.shape[-2:]
__A , __A = conv_layer.stride
__A , __A = conv_layer.kernel_size
if in_height % stride_height == 0:
__A = max(kernel_height - stride_height , 0 )
else:
__A = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__A = max(kernel_width - stride_width , 0 )
else:
__A = max(kernel_width - (in_width % stride_width) , 0 )
__A = pad_along_width // 2
__A = pad_along_width - pad_left
__A = pad_along_height // 2
__A = pad_along_height - pad_top
__A = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(__lowercase , __lowercase , """constant""" , 0.0 )
class __lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : MobileNetVaConfig , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] = 1 , UpperCamelCase_ : Optional[int] = 1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[bool] = True , UpperCamelCase_ : Optional[bool or str] = True , ):
"""simple docstring"""
super().__init__()
__A = config
if in_channels % groups != 0:
raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups." )
if out_channels % groups != 0:
raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups." )
__A = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
__A = nn.Convad(
in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , kernel_size=UpperCamelCase_ , stride=UpperCamelCase_ , padding=UpperCamelCase_ , groups=UpperCamelCase_ , bias=UpperCamelCase_ , padding_mode="""zeros""" , )
if use_normalization:
__A = nn.BatchNormad(
num_features=UpperCamelCase_ , eps=config.layer_norm_eps , momentum=0.9997 , affine=UpperCamelCase_ , track_running_stats=UpperCamelCase_ , )
else:
__A = None
if use_activation:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__A = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCamelCase_ ):
__A = ACTaFN[config.hidden_act]
else:
__A = config.hidden_act
else:
__A = None
def lowerCAmelCase_ ( self : Union[str, Any] , UpperCamelCase_ : torch.Tensor ):
"""simple docstring"""
if self.config.tf_padding:
__A = apply_tf_padding(UpperCamelCase_ , self.convolution )
__A = self.convolution(UpperCamelCase_ )
if self.normalization is not None:
__A = self.normalization(UpperCamelCase_ )
if self.activation is not None:
__A = self.activation(UpperCamelCase_ )
return features
class __lowercase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = MobileNetVaConfig
SCREAMING_SNAKE_CASE = load_tf_weights_in_mobilenet_va
SCREAMING_SNAKE_CASE = "mobilenet_v1"
SCREAMING_SNAKE_CASE = "pixel_values"
SCREAMING_SNAKE_CASE = False
def lowerCAmelCase_ ( self : Optional[Any] , UpperCamelCase_ : Union[nn.Linear, nn.Convad] ):
"""simple docstring"""
if isinstance(UpperCamelCase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__a : Tuple = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__a : int = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , lowercase_ , )
class __lowercase ( lowercase_ ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : MobileNetVaConfig , UpperCamelCase_ : bool = True ):
"""simple docstring"""
super().__init__(UpperCamelCase_ )
__A = config
__A = 32
__A = max(int(depth * config.depth_multiplier ) , config.min_depth )
__A = MobileNetVaConvLayer(
UpperCamelCase_ , in_channels=config.num_channels , out_channels=UpperCamelCase_ , kernel_size=3 , stride=2 , )
__A = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__A = nn.ModuleList()
for i in range(13 ):
__A = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__A = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase_ , in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , kernel_size=3 , stride=strides[i] , groups=UpperCamelCase_ , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase_ , in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , kernel_size=1 , ) )
__A = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase_ : Optional[int] ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self : Any , UpperCamelCase_ : Optional[torch.Tensor] = None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[bool] = None , ):
"""simple docstring"""
__A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__A = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
__A = self.conv_stem(UpperCamelCase_ )
__A = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__A = layer_module(UpperCamelCase_ )
if output_hidden_states:
__A = all_hidden_states + (hidden_states,)
__A = hidden_states
if self.pooler is not None:
__A = torch.flatten(self.pooler(UpperCamelCase_ ) , start_dim=1 )
else:
__A = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase_ , pooler_output=UpperCamelCase_ , hidden_states=UpperCamelCase_ , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowercase_ , )
class __lowercase ( lowercase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase_ : MobileNetVaConfig ):
"""simple docstring"""
super().__init__(UpperCamelCase_ )
__A = config.num_labels
__A = MobileNetVaModel(UpperCamelCase_ )
__A = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__A = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCamelCase_ )
__A = nn.Linear(UpperCamelCase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : Optional[torch.Tensor] = None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[torch.Tensor] = None , UpperCamelCase_ : Optional[bool] = None , ):
"""simple docstring"""
__A = return_dict if return_dict is not None else self.config.use_return_dict
__A = self.mobilenet_va(UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ )
__A = outputs.pooler_output if return_dict else outputs[1]
__A = self.classifier(self.dropout(UpperCamelCase_ ) )
__A = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__A = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__A = """single_label_classification"""
else:
__A = """multi_label_classification"""
if self.config.problem_type == "regression":
__A = MSELoss()
if self.num_labels == 1:
__A = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__A = loss_fct(UpperCamelCase_ , UpperCamelCase_ )
elif self.config.problem_type == "single_label_classification":
__A = CrossEntropyLoss()
__A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__A = BCEWithLogitsLoss()
__A = loss_fct(UpperCamelCase_ , UpperCamelCase_ )
if not return_dict:
__A = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCamelCase_ , logits=UpperCamelCase_ , hidden_states=outputs.hidden_states , )
| 637
| 0
|
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class lowerCamelCase :
pass
| 705
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class lowerCamelCase ( __snake_case ):
__lowerCamelCase = 'funnel'
__lowerCamelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
}
def __init__( self , __lowerCamelCase=3_05_22 , __lowerCamelCase=[4, 4, 4] , __lowerCamelCase=None , __lowerCamelCase=2 , __lowerCamelCase=7_68 , __lowerCamelCase=12 , __lowerCamelCase=64 , __lowerCamelCase=30_72 , __lowerCamelCase="gelu_new" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase=None , __lowerCamelCase=1e-9 , __lowerCamelCase="mean" , __lowerCamelCase="relative_shift" , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , **__lowerCamelCase , ) -> Optional[int]:
'''simple docstring'''
snake_case: int = vocab_size
snake_case: List[str] = block_sizes
snake_case: str = [1] * len(__lowerCamelCase ) if block_repeats is None else block_repeats
assert len(__lowerCamelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
snake_case: Any = num_decoder_layers
snake_case: List[str] = d_model
snake_case: Any = n_head
snake_case: str = d_head
snake_case: Optional[Any] = d_inner
snake_case: Dict = hidden_act
snake_case: Tuple = hidden_dropout
snake_case: Optional[Any] = attention_dropout
snake_case: Optional[int] = activation_dropout
snake_case: Union[str, Any] = initializer_range
snake_case: Tuple = initializer_std
snake_case: Optional[int] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
snake_case: str = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
snake_case: List[str] = attention_type
snake_case: str = separate_cls
snake_case: Dict = truncate_seq
snake_case: List[Any] = pool_q_only
super().__init__(**__lowerCamelCase )
@property
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
return sum(self.block_sizes )
@num_hidden_layers.setter
def lowerCAmelCase_ ( self , __lowerCamelCase ) -> List[Any]:
'''simple docstring'''
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" )
@property
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
return len(self.block_sizes )
@num_blocks.setter
def lowerCAmelCase_ ( self , __lowerCamelCase ) -> Tuple:
'''simple docstring'''
raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
| 164
| 0
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCAmelCase_ ( ):
UpperCamelCase__: Union[str, Any] = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
UpperCamelCase__: Optional[Any] = Image.open(requests.get(A_ ,stream=A_).raw).convert("RGB")
return image
def lowerCAmelCase_ ( A_):
UpperCamelCase__: str = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding"))
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding"))
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight"))
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias"))
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight"))
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias"))
for i in range(config.vision_config.num_hidden_layers):
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight"))
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias"))
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight"))
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias"))
rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight"))
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",))
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias"))
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight"))
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias"))
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight"))
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias"))
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight"))
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias"))
# fmt: on
return rename_keys
def lowerCAmelCase_ ( A_ ,A_ ,A_):
UpperCamelCase__: Optional[Any] = dct.pop(A_)
UpperCamelCase__: Tuple = val
def lowerCAmelCase_ ( A_ ,A_):
for i in range(config.vision_config.num_hidden_layers):
# read in original q and v biases
UpperCamelCase__: List[str] = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias")
UpperCamelCase__: Optional[int] = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias")
# next, set bias in the state dict
UpperCamelCase__: Union[str, Any] = torch.cat((q_bias, torch.zeros_like(A_ ,requires_grad=A_), v_bias))
UpperCamelCase__: Dict = qkv_bias
def lowerCAmelCase_ ( A_ ,A_):
UpperCamelCase__: List[str] = 3_64 if "coco" in model_name else 2_24
UpperCamelCase__: str = BlipaVisionConfig(image_size=A_).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCamelCase__: Union[str, Any] = OPTConfig.from_pretrained("facebook/opt-2.7b" ,eos_token_id=A_).to_dict()
elif "opt-6.7b" in model_name:
UpperCamelCase__: str = OPTConfig.from_pretrained("facebook/opt-6.7b" ,eos_token_id=A_).to_dict()
elif "t5-xl" in model_name:
UpperCamelCase__: int = TaConfig.from_pretrained("google/flan-t5-xl" ,dense_act_fn="gelu" ,bos_token_id=1).to_dict()
elif "t5-xxl" in model_name:
UpperCamelCase__: str = TaConfig.from_pretrained("google/flan-t5-xxl" ,dense_act_fn="gelu" ,bos_token_id=1).to_dict()
UpperCamelCase__: List[str] = BlipaConfig(vision_config=A_ ,text_config=A_)
return config, image_size
@torch.no_grad()
def lowerCAmelCase_ ( A_ ,A_=None ,A_=False):
UpperCamelCase__: List[str] = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b")
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl")
)
UpperCamelCase__: Dict = tokenizer("\n" ,add_special_tokens=A_).input_ids[0]
UpperCamelCase__ , UpperCamelCase__: Optional[Any] = get_blipa_config(A_ ,eos_token_id=A_)
UpperCamelCase__: int = BlipaForConditionalGeneration(A_).eval()
UpperCamelCase__: Optional[Any] = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
UpperCamelCase__ , UpperCamelCase__: Optional[int] = model_name_to_original[model_name]
# load original model
print("Loading original model...")
UpperCamelCase__: Optional[int] = "cuda" if torch.cuda.is_available() else "cpu"
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__: str = load_model_and_preprocess(
name=A_ ,model_type=A_ ,is_eval=A_ ,device=A_)
original_model.eval()
print("Done!")
# update state dict keys
UpperCamelCase__: List[Any] = original_model.state_dict()
UpperCamelCase__: Tuple = create_rename_keys(A_)
for src, dest in rename_keys:
rename_key(A_ ,A_ ,A_)
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCamelCase__: Tuple = state_dict.pop(A_)
if key.startswith("Qformer.bert"):
UpperCamelCase__: Dict = key.replace("Qformer.bert" ,"qformer")
if "attention.self" in key:
UpperCamelCase__: Optional[int] = key.replace("self" ,"attention")
if "opt_proj" in key:
UpperCamelCase__: str = key.replace("opt_proj" ,"language_projection")
if "t5_proj" in key:
UpperCamelCase__: Optional[Any] = key.replace("t5_proj" ,"language_projection")
if key.startswith("opt"):
UpperCamelCase__: Optional[int] = key.replace("opt" ,"language")
if key.startswith("t5"):
UpperCamelCase__: str = key.replace("t5" ,"language")
UpperCamelCase__: List[str] = val
# read in qv biases
read_in_q_v_bias(A_ ,A_)
UpperCamelCase__ , UpperCamelCase__: Optional[int] = hf_model.load_state_dict(A_ ,strict=A_)
assert len(A_) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCamelCase__: List[str] = load_demo_image()
UpperCamelCase__: int = vis_processors["eval"](A_).unsqueeze(0).to(A_)
UpperCamelCase__: Dict = tokenizer(["\n"] ,return_tensors="pt").input_ids.to(A_)
# create processor
UpperCamelCase__: str = BlipImageProcessor(
size={"height": image_size, "width": image_size} ,image_mean=A_ ,image_std=A_)
UpperCamelCase__: str = BlipaProcessor(image_processor=A_ ,tokenizer=A_)
UpperCamelCase__: Optional[int] = processor(images=A_ ,return_tensors="pt").pixel_values.to(A_)
# make sure processor creates exact same pixel values
assert torch.allclose(A_ ,A_)
original_model.to(A_)
hf_model.to(A_)
with torch.no_grad():
if "opt" in model_name:
UpperCamelCase__: List[str] = original_model({"image": original_pixel_values, "text_input": [""]}).logits
UpperCamelCase__: Optional[int] = hf_model(A_ ,A_).logits
else:
UpperCamelCase__: Dict = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]}).logits
UpperCamelCase__: Union[str, Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id ,-1_00)
UpperCamelCase__: Union[str, Any] = hf_model(A_ ,A_ ,labels=A_).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" ,original_logits[0, :3, :3])
print("First values of HF logits:" ,logits[0, :3, :3])
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCamelCase__: Optional[int] = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] ,device=A_)
assert torch.allclose(logits[0, :3, :3] ,A_ ,atol=1e-4)
elif model_name == "blip2-flan-t5-xl-coco":
UpperCamelCase__: List[Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] ,device=A_)
else:
# cast to same type
UpperCamelCase__: List[str] = logits.dtype
assert torch.allclose(original_logits.to(A_) ,A_ ,atol=1e-2)
print("Looks ok!")
print("Generating a caption...")
UpperCamelCase__: List[Any] = ""
UpperCamelCase__: str = tokenizer(A_ ,return_tensors="pt").input_ids.to(A_)
UpperCamelCase__: List[str] = original_model.generate({"image": original_pixel_values})
UpperCamelCase__: Union[str, Any] = hf_model.generate(
A_ ,A_ ,do_sample=A_ ,num_beams=5 ,max_length=30 ,min_length=1 ,top_p=0.9 ,repetition_penalty=1.0 ,length_penalty=1.0 ,temperature=1 ,)
print("Original generation:" ,A_)
UpperCamelCase__: str = input_ids.shape[1]
UpperCamelCase__: Any = processor.batch_decode(outputs[:, prompt_length:] ,skip_special_tokens=A_)
UpperCamelCase__: List[Any] = [text.strip() for text in output_text]
print("HF generation:" ,A_)
if pytorch_dump_folder_path is not None:
processor.save_pretrained(A_)
hf_model.save_pretrained(A_)
if push_to_hub:
processor.push_to_hub(F"nielsr/{model_name}")
hf_model.push_to_hub(F"nielsr/{model_name}")
if __name__ == "__main__":
A__: int = argparse.ArgumentParser()
A__: Tuple = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
A__: str = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 380
|
import logging
from transformers import PretrainedConfig
A__: Dict = logging.getLogger(__name__)
A__: List[Any] = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = """bertabs"""
def __init__( self: Any , __lowerCamelCase: Optional[int]=3_0522 , __lowerCamelCase: List[Any]=512 , __lowerCamelCase: Optional[Any]=6 , __lowerCamelCase: Optional[Any]=512 , __lowerCamelCase: List[str]=8 , __lowerCamelCase: List[Any]=512 , __lowerCamelCase: Optional[int]=0.2 , __lowerCamelCase: Optional[int]=6 , __lowerCamelCase: Optional[Any]=768 , __lowerCamelCase: int=8 , __lowerCamelCase: Union[str, Any]=2048 , __lowerCamelCase: Dict=0.2 , **__lowerCamelCase: Dict , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
UpperCamelCase__: str = vocab_size
UpperCamelCase__: Tuple = max_pos
UpperCamelCase__: Union[str, Any] = enc_layers
UpperCamelCase__: Optional[Any] = enc_hidden_size
UpperCamelCase__: Optional[Any] = enc_heads
UpperCamelCase__: Dict = enc_ff_size
UpperCamelCase__: Tuple = enc_dropout
UpperCamelCase__: Tuple = dec_layers
UpperCamelCase__: int = dec_hidden_size
UpperCamelCase__: int = dec_heads
UpperCamelCase__: str = dec_ff_size
UpperCamelCase__: Union[str, Any] = dec_dropout
| 380
| 1
|
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : str = logging.get_logger(__name__)
a_ : Any = {
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """owlvit_text_model"""
def __init__( self , __magic_name__=4_94_08 , __magic_name__=5_12 , __magic_name__=20_48 , __magic_name__=12 , __magic_name__=8 , __magic_name__=16 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.0_2 , __magic_name__=1.0 , __magic_name__=0 , __magic_name__=4_94_06 , __magic_name__=4_94_07 , **__magic_name__ , ) -> Tuple:
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
_a = vocab_size
_a = hidden_size
_a = intermediate_size
_a = num_hidden_layers
_a = num_attention_heads
_a = max_position_embeddings
_a = hidden_act
_a = layer_norm_eps
_a = attention_dropout
_a = initializer_range
_a = initializer_factor
@classmethod
def __UpperCAmelCase ( cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
_a , _a = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
_a = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """owlvit_vision_model"""
def __init__( self , __magic_name__=7_68 , __magic_name__=30_72 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3 , __magic_name__=7_68 , __magic_name__=32 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.0_2 , __magic_name__=1.0 , **__magic_name__ , ) -> Dict:
super().__init__(**__magic_name__ )
_a = hidden_size
_a = intermediate_size
_a = num_hidden_layers
_a = num_attention_heads
_a = num_channels
_a = image_size
_a = patch_size
_a = hidden_act
_a = layer_norm_eps
_a = attention_dropout
_a = initializer_range
_a = initializer_factor
@classmethod
def __UpperCAmelCase ( cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
_a , _a = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
_a = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """owlvit"""
_lowerCAmelCase = True
def __init__( self , __magic_name__=None , __magic_name__=None , __magic_name__=5_12 , __magic_name__=2.6_5_9_2 , __magic_name__=True , **__magic_name__ , ) -> Tuple:
super().__init__(**__magic_name__ )
if text_config is None:
_a = {}
logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.' )
if vision_config is None:
_a = {}
logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.' )
_a = OwlViTTextConfig(**__magic_name__ )
_a = OwlViTVisionConfig(**__magic_name__ )
_a = projection_dim
_a = logit_scale_init_value
_a = return_dict
_a = 1.0
@classmethod
def __UpperCAmelCase ( cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
_a , _a = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
@classmethod
def __UpperCAmelCase ( cls , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Optional[int]:
_a = {}
_a = text_config
_a = vision_config
return cls.from_dict(__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self ) -> Tuple:
_a = copy.deepcopy(self.__dict__ )
_a = self.text_config.to_dict()
_a = self.vision_config.to_dict()
_a = self.__class__.model_type
return output
class a ( _SCREAMING_SNAKE_CASE ):
@property
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
] )
@property
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('logits_per_image', {0: 'batch'}),
('logits_per_text', {0: 'batch'}),
('text_embeds', {0: 'batch'}),
('image_embeds', {0: 'batch'}),
] )
@property
def __UpperCAmelCase ( self ) -> float:
return 1e-4
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = None , ) -> Mapping[str, Any]:
_a = super().generate_dummy_inputs(
processor.tokenizer , batch_size=__magic_name__ , seq_length=__magic_name__ , framework=__magic_name__ )
_a = super().generate_dummy_inputs(
processor.image_processor , batch_size=__magic_name__ , framework=__magic_name__ )
return {**text_input_dict, **image_input_dict}
@property
def __UpperCAmelCase ( self ) -> int:
return 14
| 707
|
'''simple docstring'''
class a :
def __init__( self , __magic_name__ ) -> Optional[int]:
_a = n
_a = [None] * self.n
_a = 0 # index of the first element
_a = 0
_a = 0
def __len__( self ) -> int:
return self.size
def __UpperCAmelCase ( self ) -> bool:
return self.size == 0
def __UpperCAmelCase ( self ) -> Optional[Any]:
return False if self.is_empty() else self.array[self.front]
def __UpperCAmelCase ( self , __magic_name__ ) -> Optional[Any]:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
_a = data
_a = (self.rear + 1) % self.n
self.size += 1
return self
def __UpperCAmelCase ( self ) -> Any:
if self.size == 0:
raise Exception('UNDERFLOW' )
_a = self.array[self.front]
_a = None
_a = (self.front + 1) % self.n
self.size -= 1
return temp
| 532
| 0
|
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] , UpperCAmelCase_ : Dict="</s>" , UpperCAmelCase_ : Any="<unk>" , UpperCAmelCase_ : Dict="<pad>" , UpperCAmelCase_ : Union[str, Any]=125 , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : Optional[Any] , ) -> None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
_lowerCAmelCase = [F"""<extra_id_{i}>""" for i in range(UpperCAmelCase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_lowerCAmelCase = len(set(filter(lambda UpperCAmelCase_ : bool('extra_id' in str(UpperCAmelCase_ ) ) , UpperCAmelCase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
_lowerCAmelCase = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else pad_token
_lowerCAmelCase = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else eos_token
_lowerCAmelCase = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else unk_token
super().__init__(
eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , extra_ids=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
_lowerCAmelCase = extra_ids
_lowerCAmelCase = 2**8 # utf is 8 bits
# define special tokens dict
_lowerCAmelCase = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
_lowerCAmelCase = len(self.special_tokens_encoder )
_lowerCAmelCase = len(UpperCAmelCase_ )
for i, token in enumerate(UpperCAmelCase_ ):
_lowerCAmelCase = self.vocab_size + i - n
_lowerCAmelCase = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __lowerCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __lowerCamelCase ( self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(UpperCAmelCase_ )) + [1]
return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1]
def __lowerCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[int] ) -> List[int]:
"""simple docstring"""
if len(UpperCAmelCase_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __lowerCamelCase ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_lowerCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __lowerCamelCase ( self : Optional[Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_lowerCAmelCase = self._add_eos_if_not_present(UpperCAmelCase_ )
if token_ids_a is None:
return token_ids_a
else:
_lowerCAmelCase = self._add_eos_if_not_present(UpperCAmelCase_ )
return token_ids_a + token_ids_a
def __lowerCamelCase ( self : Any , UpperCAmelCase_ : str ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = [chr(UpperCAmelCase_ ) for i in text.encode('utf-8' )]
return tokens
def __lowerCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
if token in self.special_tokens_encoder:
_lowerCAmelCase = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
_lowerCAmelCase = self.added_tokens_encoder[token]
elif len(UpperCAmelCase_ ) != 1:
_lowerCAmelCase = self.unk_token_id
else:
_lowerCAmelCase = ord(UpperCAmelCase_ ) + self._num_special_tokens
return token_id
def __lowerCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Dict ) -> Optional[int]:
"""simple docstring"""
if index in self.special_tokens_decoder:
_lowerCAmelCase = self.special_tokens_decoder[index]
else:
_lowerCAmelCase = chr(index - self._num_special_tokens )
return token
def __lowerCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = b''
for token in tokens:
if token in self.special_tokens_decoder:
_lowerCAmelCase = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
_lowerCAmelCase = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
_lowerCAmelCase = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
_lowerCAmelCase = token.encode('utf-8' )
else:
_lowerCAmelCase = bytes([ord(UpperCAmelCase_ )] )
bstring += tok_string
_lowerCAmelCase = bstring.decode('utf-8' , errors='ignore' )
return string
def __lowerCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
return ()
| 580
|
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : WhisperForConditionalGeneration , UpperCAmelCase_ : WhisperProcessor , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : CLIPTextModel , UpperCAmelCase_ : CLIPTokenizer , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase_ : StableDiffusionSafetyChecker , UpperCAmelCase_ : CLIPImageProcessor , ) -> List[Any]:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=UpperCAmelCase_ , speech_processor=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , )
def __lowerCamelCase ( self : Tuple , UpperCAmelCase_ : Optional[Union[str, int]] = "auto" ) -> List[str]:
"""simple docstring"""
if slice_size == "auto":
_lowerCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase_ )
def __lowerCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
self.enable_attention_slicing(UpperCAmelCase_ )
@torch.no_grad()
def __call__( self : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int=16_000 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Any , ) -> Any:
"""simple docstring"""
_lowerCAmelCase = self.speech_processor.feature_extractor(
UpperCAmelCase_ , return_tensors='pt' , sampling_rate=UpperCAmelCase_ ).input_features.to(self.device )
_lowerCAmelCase = self.speech_model.generate(UpperCAmelCase_ , max_length=480_000 )
_lowerCAmelCase = self.speech_processor.tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , normalize=UpperCAmelCase_ )[
0
]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCAmelCase = 1
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCAmelCase = len(UpperCAmelCase_ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase_ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(UpperCAmelCase_ )}.""" )
# get prompt text embeddings
_lowerCAmelCase = self.tokenizer(
UpperCAmelCase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
_lowerCAmelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowerCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_lowerCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length]
_lowerCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = text_embeddings.shape
_lowerCAmelCase = text_embeddings.repeat(1 , UpperCAmelCase_ , 1 )
_lowerCAmelCase = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCAmelCase_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase = 42
if negative_prompt is None:
_lowerCAmelCase = [''] * batch_size
elif type(UpperCAmelCase_ ) is not type(UpperCAmelCase_ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(UpperCAmelCase_ )} !="""
F""" {type(UpperCAmelCase_ )}.""" )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCAmelCase = [negative_prompt]
elif batch_size != len(UpperCAmelCase_ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(UpperCAmelCase_ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
' the batch size of `prompt`.' )
else:
_lowerCAmelCase = negative_prompt
_lowerCAmelCase = text_input_ids.shape[-1]
_lowerCAmelCase = self.tokenizer(
UpperCAmelCase_ , padding='max_length' , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors='pt' , )
_lowerCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase = uncond_embeddings.shape[1]
_lowerCAmelCase = uncond_embeddings.repeat(1 , UpperCAmelCase_ , 1 )
_lowerCAmelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCAmelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_lowerCAmelCase = torch.randn(UpperCAmelCase_ , generator=UpperCAmelCase_ , device='cpu' , dtype=UpperCAmelCase_ ).to(
self.device )
else:
_lowerCAmelCase = torch.randn(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=self.device , dtype=UpperCAmelCase_ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_lowerCAmelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCAmelCase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_lowerCAmelCase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase = {}
if accepts_eta:
_lowerCAmelCase = eta
for i, t in enumerate(self.progress_bar(UpperCAmelCase_ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase = self.scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
# predict the noise residual
_lowerCAmelCase = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ ).sample
# perform guidance
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase = noise_pred.chunk(2 )
_lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase = 1 / 0.18215 * latents
_lowerCAmelCase = self.vae.decode(UpperCAmelCase_ ).sample
_lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=UpperCAmelCase_ , nsfw_content_detected=UpperCAmelCase_ )
| 580
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCamelCase_ ( __UpperCamelCase ,unittest.TestCase ):
"""simple docstring"""
A = ShapEPipeline
A = ['''prompt''']
A = ['''prompt''']
A = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
A = False
@property
def lowerCamelCase_ ( self ):
return 3_2
@property
def lowerCamelCase_ ( self ):
return 3_2
@property
def lowerCamelCase_ ( self ):
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self ):
return 8
@property
def lowerCamelCase_ ( self ):
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCamelCase_ ( self ):
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(UpperCAmelCase )
@property
def lowerCamelCase_ ( self ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 1_6,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 3_2,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
__lowerCamelCase = PriorTransformer(**UpperCAmelCase )
return model
@property
def lowerCamelCase_ ( self ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""param_shapes""": (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 1_2,
"""background""": (
0.1,
0.1,
0.1,
),
}
__lowerCamelCase = ShapERenderer(**UpperCAmelCase )
return model
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.dummy_prior
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = self.dummy_tokenizer
__lowerCamelCase = self.dummy_renderer
__lowerCamelCase = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1_0_2_4 , prediction_type="""sample""" , use_karras_sigmas=UpperCAmelCase , clip_sample=UpperCAmelCase , clip_sample_range=1.0 , )
__lowerCamelCase = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase=0 ):
if str(UpperCAmelCase ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCAmelCase )
else:
__lowerCamelCase = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
__lowerCamelCase = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 3_2,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase_ ( self ):
__lowerCamelCase = """cpu"""
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCAmelCase )
__lowerCamelCase = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase = pipe(**self.get_dummy_inputs(UpperCAmelCase ) )
__lowerCamelCase = output.images[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
__lowerCamelCase = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase_ ( self ):
__lowerCamelCase = torch_device == """cpu"""
__lowerCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCAmelCase , relax_max_difference=UpperCAmelCase , )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCAmelCase )
__lowerCamelCase = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase = 1
__lowerCamelCase = 2
__lowerCamelCase = self.get_dummy_inputs(UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
__lowerCamelCase = batch_size * [inputs[key]]
__lowerCamelCase = pipe(**UpperCAmelCase , num_images_per_prompt=UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ):
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
__lowerCamelCase = ShapEPipeline.from_pretrained("""openai/shap-e""" )
__lowerCamelCase = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
__lowerCamelCase = pipe(
"""a shark""" , generator=UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type="""np""" , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
| 701
|
from __future__ import annotations
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self , UpperCAmelCase ):
__lowerCamelCase = data
__lowerCamelCase = None
__lowerCamelCase = None
def UpperCamelCase__ ( _A: Node | None ): # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def UpperCamelCase__ ( _A: Node | None ):
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def UpperCamelCase__ ( _A: Node ):
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def UpperCamelCase__ ( ): # Main function for testing.
'''simple docstring'''
__lowerCamelCase = Node(1 )
__lowerCamelCase = Node(2 )
__lowerCamelCase = Node(3 )
__lowerCamelCase = Node(4 )
__lowerCamelCase = Node(5 )
__lowerCamelCase = Node(6 )
__lowerCamelCase = Node(7 )
__lowerCamelCase = Node(8 )
__lowerCamelCase = Node(9 )
print(is_full_binary_tree(_A ) )
print(depth_of_tree(_A ) )
print("""Tree is: """ )
display(_A )
if __name__ == "__main__":
main()
| 571
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 68
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Dict = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'codegen'
_UpperCamelCase = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,_lowerCAmelCase=5_04_00 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=28 ,_lowerCAmelCase=16 ,_lowerCAmelCase=64 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_new" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=False ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_ctx
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = rotary_dim
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = use_cache
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(
bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,tie_word_embeddings=_lowerCAmelCase ,**_lowerCAmelCase )
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = "default" ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,):
super().__init__(_lowerCAmelCase ,task=_lowerCAmelCase ,patching_specs=_lowerCAmelCase ,use_past=_lowerCAmelCase )
if not getattr(self._config ,"""pad_token_id""" ,_lowerCAmelCase ):
# TODO: how to do that better?
lowerCamelCase__ = 0
@property
def UpperCamelCase_ ( self ):
lowerCamelCase__ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" )
lowerCamelCase__ = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase__ = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase_ ( self ):
return self._config.n_layer
@property
def UpperCamelCase_ ( self ):
return self._config.n_head
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,):
lowerCamelCase__ = super(_lowerCAmelCase ,self ).generate_dummy_inputs(
_lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase__ = seqlen + 2
lowerCamelCase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase__ = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers )
]
lowerCamelCase__ = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase__ = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase__ = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 )
return ordered_inputs
@property
def UpperCamelCase_ ( self ):
return 13
| 50
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCamelCase ( unittest.TestCase ):
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=7 ,__UpperCamelCase=3 ,__UpperCamelCase=18 ,__UpperCamelCase=30 ,__UpperCamelCase=400 ,__UpperCamelCase=True ,__UpperCamelCase=None ,__UpperCamelCase=True ,__UpperCamelCase=[0.5, 0.5, 0.5] ,__UpperCamelCase=[0.5, 0.5, 0.5] ,) -> Dict:
'''simple docstring'''
lowercase_ : List[str] = size if size is not None else {'height': 18, 'width': 18}
lowercase_ : int = parent
lowercase_ : Optional[int] = batch_size
lowercase_ : Dict = num_channels
lowercase_ : Any = image_size
lowercase_ : Union[str, Any] = min_resolution
lowercase_ : List[Any] = max_resolution
lowercase_ : Any = do_resize
lowercase_ : List[Any] = size
lowercase_ : str = do_normalize
lowercase_ : Union[str, Any] = image_mean
lowercase_ : Optional[int] = image_std
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase ( lowercase_ , unittest.TestCase ):
lowercase = DPTImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : List[str] = DPTImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase ,'image_mean' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'image_std' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'do_normalize' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'do_resize' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'size' ) )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
lowercase_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase ,Image.Image )
# Test not batched input
lowercase_ : Optional[Any] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
lowercase_ : Union[str, Any] = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase ,numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase ,np.ndarray )
# Test not batched input
lowercase_ : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
lowercase_ : int = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase ,torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase ,torch.Tensor )
# Test not batched input
lowercase_ : Optional[Any] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
lowercase_ : str = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
| 477
|
"""simple docstring"""
import numpy as np
def lowercase__( __SCREAMING_SNAKE_CASE : np.array ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 477
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
snake_case = logging.get_logger(__name__)
class lowerCAmelCase ( UpperCamelCase_ ):
A_ : str = ["""input_values""", """padding_mask"""]
def __init__( self : Union[str, Any] , a__ : int = 1 , a__ : int = 2_4000 , a__ : float = 0.0 , a__ : float = None , a__ : float = None , **a__ : int , ):
'''simple docstring'''
super().__init__(feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , **_lowercase )
lowerCAmelCase__ : List[str] = chunk_length_s
lowerCAmelCase__ : Dict = overlap
@property
def _A ( self : List[Any] ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _A ( self : Optional[Any] ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : Tuple , a__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a__ : Optional[Union[bool, str, PaddingStrategy]] = None , a__ : Optional[bool] = False , a__ : Optional[int] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : Optional[int] = None , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
lowerCAmelCase__ : str = True
lowerCAmelCase__ : List[str] = bool(
isinstance(_lowercase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
lowerCAmelCase__ : Optional[Any] = [np.asarray(_lowercase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_lowercase , np.ndarray ):
lowerCAmelCase__ : Tuple = np.asarray(_lowercase , dtype=np.floataa )
elif isinstance(_lowercase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ : Dict = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ : Dict = [np.asarray(_lowercase ).T]
# verify inputs are valid
for idx, example in enumerate(_lowercase ):
if example.ndim > 2:
raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' )
lowerCAmelCase__ : Any = None
lowerCAmelCase__ : int = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
lowerCAmelCase__ : Optional[int] = min(array.shape[0] for array in raw_audio )
lowerCAmelCase__ : Any = int(np.floor(max_length / self.chunk_stride ) )
lowerCAmelCase__ : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
lowerCAmelCase__ : Any = max(array.shape[0] for array in raw_audio )
lowerCAmelCase__ : Optional[Any] = int(np.ceil(max_length / self.chunk_stride ) )
lowerCAmelCase__ : int = (nb_step - 1) * self.chunk_stride + self.chunk_length
lowerCAmelCase__ : Dict = "max_length"
else:
lowerCAmelCase__ : Any = input_values
# normal padding on batch
if padded_inputs is None:
lowerCAmelCase__ : Optional[int] = self.pad(
_lowercase , max_length=_lowercase , truncation=_lowercase , padding=_lowercase , return_attention_mask=_lowercase , )
if padding:
lowerCAmelCase__ : Union[str, Any] = padded_inputs.pop("attention_mask" )
lowerCAmelCase__ : Any = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
lowerCAmelCase__ : Optional[Any] = example[..., None]
input_values.append(example.T )
lowerCAmelCase__ : Optional[int] = input_values
if return_tensors is not None:
lowerCAmelCase__ : int = padded_inputs.convert_to_tensors(_lowercase )
return padded_inputs
| 378
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self : str , _lowercase : List[str] , _lowercase : Dict=13 , _lowercase : List[str]=7 , _lowercase : Union[str, Any]=True , _lowercase : int=True , _lowercase : int=True , _lowercase : Optional[Any]=True , _lowercase : List[Any]=99 , _lowercase : Optional[Any]=[1, 1, 2] , _lowercase : Optional[Any]=1 , _lowercase : List[str]=32 , _lowercase : Dict=4 , _lowercase : List[str]=8 , _lowercase : List[str]=37 , _lowercase : int="gelu_new" , _lowercase : Optional[int]=0.1 , _lowercase : List[str]=0.1 , _lowercase : Union[str, Any]=0.0 , _lowercase : str=512 , _lowercase : Optional[Any]=3 , _lowercase : str=0.02 , _lowercase : Union[str, Any]=3 , _lowercase : Optional[Any]=4 , _lowercase : Tuple=None , _lowercase : List[Any]=False , ) -> List[str]:
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = block_sizes
A_ = num_decoder_layers
A_ = d_model
A_ = n_head
A_ = d_head
A_ = d_inner
A_ = hidden_act
A_ = hidden_dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = 2
A_ = num_labels
A_ = num_choices
A_ = scope
A_ = initializer_std
# Used in the tests to check the size of the first attention layer
A_ = n_head
# Used in the tests to check the size of the first hidden state
A_ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
A_ = sum(self.block_sizes) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
A_ = self.num_hidden_layers + 2
def __snake_case ( self : str) -> List[str]:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length])
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
A_ = ids_tensor([self.batch_size] , self.num_choices)
A_ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __snake_case ( self : Tuple , _lowercase : Tuple , _lowercase : str , _lowercase : int , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : str , _lowercase : str , ) -> Dict:
A_ = TFFunnelModel(config=_lowercase)
A_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A_ = model(_lowercase)
A_ = [input_ids, input_mask]
A_ = model(_lowercase)
A_ = model(_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
A_ = False
A_ = TFFunnelModel(config=_lowercase)
A_ = model(_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
A_ = False
A_ = TFFunnelModel(config=_lowercase)
A_ = model(_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
def __snake_case ( self : Union[str, Any] , _lowercase : int , _lowercase : Tuple , _lowercase : int , _lowercase : int , _lowercase : Tuple , _lowercase : List[str] , _lowercase : Optional[int] , ) -> Dict:
A_ = TFFunnelBaseModel(config=_lowercase)
A_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A_ = model(_lowercase)
A_ = [input_ids, input_mask]
A_ = model(_lowercase)
A_ = model(_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
A_ = False
A_ = TFFunnelBaseModel(config=_lowercase)
A_ = model(_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model))
A_ = False
A_ = TFFunnelBaseModel(config=_lowercase)
A_ = model(_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
def __snake_case ( self : int , _lowercase : List[str] , _lowercase : int , _lowercase : List[str] , _lowercase : str , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : Optional[Any] , ) -> List[Any]:
A_ = TFFunnelForPreTraining(config=_lowercase)
A_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A_ = model(_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length))
def __snake_case ( self : List[str] , _lowercase : str , _lowercase : List[Any] , _lowercase : List[str] , _lowercase : List[Any] , _lowercase : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : Tuple , ) -> Union[str, Any]:
A_ = TFFunnelForMaskedLM(config=_lowercase)
A_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A_ = model(_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __snake_case ( self : Union[str, Any] , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Any , _lowercase : List[str] , _lowercase : int , _lowercase : Optional[int] , _lowercase : str , ) -> Dict:
A_ = self.num_labels
A_ = TFFunnelForSequenceClassification(config=_lowercase)
A_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A_ = model(_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __snake_case ( self : Optional[int] , _lowercase : int , _lowercase : Optional[Any] , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : str , ) -> List[str]:
A_ = self.num_choices
A_ = TFFunnelForMultipleChoice(config=_lowercase)
A_ = tf.tile(tf.expand_dims(_lowercase , 1) , (1, self.num_choices, 1))
A_ = tf.tile(tf.expand_dims(_lowercase , 1) , (1, self.num_choices, 1))
A_ = tf.tile(tf.expand_dims(_lowercase , 1) , (1, self.num_choices, 1))
A_ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
A_ = model(_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __snake_case ( self : Any , _lowercase : Union[str, Any] , _lowercase : Dict , _lowercase : List[Any] , _lowercase : List[str] , _lowercase : int , _lowercase : int , _lowercase : Dict , ) -> List[str]:
A_ = self.num_labels
A_ = TFFunnelForTokenClassification(config=_lowercase)
A_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A_ = model(_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __snake_case ( self : Optional[int] , _lowercase : str , _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : List[Any] , _lowercase : List[str] , _lowercase : List[Any] , _lowercase : Union[str, Any] , ) -> Optional[int]:
A_ = TFFunnelForQuestionAnswering(config=_lowercase)
A_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A_ = model(_lowercase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __snake_case ( self : str) -> Union[str, Any]:
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( lowerCAmelCase ,lowerCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
_UpperCamelCase = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase = False
_UpperCamelCase = False
def __snake_case ( self : Optional[int]) -> Optional[Any]:
A_ = TFFunnelModelTester(self)
A_ = ConfigTester(self , config_class=_lowercase)
def __snake_case ( self : Any) -> Optional[Any]:
self.config_tester.run_common_tests()
def __snake_case ( self : Union[str, Any]) -> Union[str, Any]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase)
def __snake_case ( self : List[Any]) -> str:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowercase)
def __snake_case ( self : Any) -> Optional[int]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase)
def __snake_case ( self : List[str]) -> Tuple:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase)
def __snake_case ( self : int) -> Dict:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase)
@require_tf
class __UpperCAmelCase ( lowerCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
_UpperCamelCase = False
_UpperCamelCase = False
def __snake_case ( self : str) -> Dict:
A_ = TFFunnelModelTester(self , base=_lowercase)
A_ = ConfigTester(self , config_class=_lowercase)
def __snake_case ( self : Optional[Any]) -> Tuple:
self.config_tester.run_common_tests()
def __snake_case ( self : List[str]) -> List[str]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*_lowercase)
def __snake_case ( self : Dict) -> Dict:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase)
def __snake_case ( self : Union[str, Any]) -> int:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowercase)
| 366
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : str = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
A__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 718
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowercase__ :
_UpperCAmelCase :Dict = XGLMConfig
_UpperCAmelCase :List[Any] = {}
_UpperCAmelCase :str = "gelu"
def __init__( self : Tuple , snake_case__ : Any , snake_case__ : int=14 , snake_case__ : Union[str, Any]=7 , snake_case__ : Tuple=True , snake_case__ : List[Any]=True , snake_case__ : Optional[int]=True , snake_case__ : int=99 , snake_case__ : Optional[Any]=32 , snake_case__ : int=2 , snake_case__ : Union[str, Any]=4 , snake_case__ : Any=37 , snake_case__ : Optional[int]="gelu" , snake_case__ : Any=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : Union[str, Any]=512 , snake_case__ : Tuple=0.02 , ):
lowerCamelCase_ : Optional[Any] =parent
lowerCamelCase_ : Dict =batch_size
lowerCamelCase_ : str =seq_length
lowerCamelCase_ : Union[str, Any] =is_training
lowerCamelCase_ : int =use_input_mask
lowerCamelCase_ : List[Any] =use_labels
lowerCamelCase_ : List[Any] =vocab_size
lowerCamelCase_ : Tuple =d_model
lowerCamelCase_ : List[Any] =num_hidden_layers
lowerCamelCase_ : Optional[int] =num_attention_heads
lowerCamelCase_ : Any =ffn_dim
lowerCamelCase_ : int =activation_function
lowerCamelCase_ : List[str] =activation_dropout
lowerCamelCase_ : List[Any] =attention_dropout
lowerCamelCase_ : Union[str, Any] =max_position_embeddings
lowerCamelCase_ : Any =initializer_range
lowerCamelCase_ : Optional[Any] =None
lowerCamelCase_ : Any =0
lowerCamelCase_ : int =2
lowerCamelCase_ : Optional[Any] =1
def UpperCAmelCase__ ( self : int ):
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : List[str] =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
lowerCamelCase_ : Any =None
if self.use_input_mask:
lowerCamelCase_ : Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : Any =self.get_config()
lowerCamelCase_ : Optional[Any] =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def UpperCAmelCase__ ( self : Union[str, Any] ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=snake_case__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=snake_case__ , )
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : Tuple =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) : Tuple =config_and_inputs
lowerCamelCase_ : Optional[int] ={
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class lowercase__ ( snake_case__, snake_case__, unittest.TestCase ):
_UpperCAmelCase :Any = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_UpperCAmelCase :str = (TFXGLMForCausalLM,) if is_tf_available() else ()
_UpperCAmelCase :int = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :List[Any] = False
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Dict =TFXGLMModelTester(self )
lowerCamelCase_ : List[str] =ConfigTester(self , config_class=snake_case__ , n_embd=37 )
def UpperCAmelCase__ ( self : str ):
self.config_tester.run_common_tests()
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : List[Any] =TFXGLMModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def UpperCAmelCase__ ( self : Optional[int] ):
super().test_resize_token_embeddings()
@require_tf
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Tuple , snake_case__ : Tuple=True ):
lowerCamelCase_ : int =TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
lowerCamelCase_ : Optional[int] =tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
lowerCamelCase_ : List[Any] =[2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
lowerCamelCase_ : Optional[int] =model.generate(snake_case__ , do_sample=snake_case__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , snake_case__ )
@slow
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : int =XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
lowerCamelCase_ : str =TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
lowerCamelCase_ : Tuple =tokenizer("Today is a nice day and" , return_tensors="tf" )
lowerCamelCase_ : Dict =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
lowerCamelCase_ : Optional[int] =model.generate(snake_case__ , do_sample=snake_case__ , seed=[7, 0] )
lowerCamelCase_ : Any =tokenizer.decode(output_ids[0] , skip_special_tokens=snake_case__ )
lowerCamelCase_ : int =(
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(snake_case__ , snake_case__ )
@slow
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : Any =TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
lowerCamelCase_ : Union[str, Any] =XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
lowerCamelCase_ : Tuple ="left"
# use different length sentences to test batching
lowerCamelCase_ : Union[str, Any] =[
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
lowerCamelCase_ : Dict =tokenizer(snake_case__ , return_tensors="tf" , padding=snake_case__ )
lowerCamelCase_ : str =inputs["input_ids"]
lowerCamelCase_ : List[Any] =model.generate(input_ids=snake_case__ , attention_mask=inputs["attention_mask"] , max_new_tokens=12 )
lowerCamelCase_ : Any =tokenizer(sentences[0] , return_tensors="tf" ).input_ids
lowerCamelCase_ : int =model.generate(input_ids=snake_case__ , max_new_tokens=12 )
lowerCamelCase_ : int =tokenizer(sentences[1] , return_tensors="tf" ).input_ids
lowerCamelCase_ : List[str] =model.generate(input_ids=snake_case__ , max_new_tokens=12 )
lowerCamelCase_ : Optional[Any] =tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
lowerCamelCase_ : Optional[Any] =tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case__ )
lowerCamelCase_ : int =tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case__ )
lowerCamelCase_ : Optional[Any] =[
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(snake_case__ , snake_case__ )
self.assertListEqual(snake_case__ , [non_padded_sentence, padded_sentence] )
| 244
| 0
|
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''char'''
lowerCAmelCase_ = '''bpe'''
lowerCAmelCase_ = '''wp'''
lowercase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''image_processor''', '''char_tokenizer''']
lowerCAmelCase_ = '''ViTImageProcessor'''
lowerCAmelCase_ = '''MgpstrTokenizer'''
def __init__( self : int , _A : str=None , _A : List[str]=None , **_A : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _A , )
__SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('''feature_extractor''' )
__SCREAMING_SNAKE_CASE : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer
__SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained('''gpt2''' )
__SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(_A , _A )
def __call__( self : int , _A : str=None , _A : List[Any]=None , _A : List[Any]=None , **_A : str ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(_A , return_tensors=_A , **_A )
if text is not None:
__SCREAMING_SNAKE_CASE : Any = self.char_tokenizer(_A , return_tensors=_A , **_A )
if text is None:
return inputs
elif images is None:
return encodings
else:
__SCREAMING_SNAKE_CASE : str = encodings['''input_ids''']
return inputs
def UpperCAmelCase__ ( self : List[Any] , _A : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = sequences
__SCREAMING_SNAKE_CASE : Tuple = char_preds.size(0 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self._decode_helper(_A , '''char''' )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = self._decode_helper(_A , '''bpe''' )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = self._decode_helper(_A , '''wp''' )
__SCREAMING_SNAKE_CASE : Tuple = []
__SCREAMING_SNAKE_CASE : Optional[int] = []
for i in range(_A ):
__SCREAMING_SNAKE_CASE : Any = [char_scores[i], bpe_scores[i], wp_scores[i]]
__SCREAMING_SNAKE_CASE : str = [char_strs[i], bpe_strs[i], wp_strs[i]]
__SCREAMING_SNAKE_CASE : Any = scores.index(max(_A ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__SCREAMING_SNAKE_CASE : str = {}
__SCREAMING_SNAKE_CASE : Optional[Any] = final_strs
__SCREAMING_SNAKE_CASE : Union[str, Any] = final_scores
__SCREAMING_SNAKE_CASE : int = char_strs
__SCREAMING_SNAKE_CASE : List[Any] = bpe_strs
__SCREAMING_SNAKE_CASE : List[str] = wp_strs
return out
def UpperCAmelCase__ ( self : Any , _A : Optional[int] , _A : List[Any] ):
"""simple docstring"""
if format == DecodeType.CHARACTER:
__SCREAMING_SNAKE_CASE : Any = self.char_decode
__SCREAMING_SNAKE_CASE : Any = 1
__SCREAMING_SNAKE_CASE : Dict = '''[s]'''
elif format == DecodeType.BPE:
__SCREAMING_SNAKE_CASE : Dict = self.bpe_decode
__SCREAMING_SNAKE_CASE : Dict = 2
__SCREAMING_SNAKE_CASE : Optional[int] = '''#'''
elif format == DecodeType.WORDPIECE:
__SCREAMING_SNAKE_CASE : str = self.wp_decode
__SCREAMING_SNAKE_CASE : Any = 102
__SCREAMING_SNAKE_CASE : Optional[int] = '''[SEP]'''
else:
raise ValueError(F'''Format {format} is not supported.''' )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = [], []
__SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(0 )
__SCREAMING_SNAKE_CASE : Tuple = pred_logits.size(1 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.topk(1 , dim=-1 , largest=_A , sorted=_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = preds_index.view(-1 , _A )[:, 1:]
__SCREAMING_SNAKE_CASE : Dict = decoder(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = torch.nn.functional.softmax(_A , dim=2 ).max(dim=2 )
__SCREAMING_SNAKE_CASE : List[str] = preds_max_prob[:, 1:]
for index in range(_A ):
__SCREAMING_SNAKE_CASE : Any = preds_str[index].find(_A )
__SCREAMING_SNAKE_CASE : int = preds_str[index][:pred_eos]
__SCREAMING_SNAKE_CASE : Optional[int] = preds_index[index].cpu().tolist()
__SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(_A ) if eos_token in pred_index else -1
__SCREAMING_SNAKE_CASE : Optional[int] = preds_max_prob[index][: pred_eos_index + 1]
__SCREAMING_SNAKE_CASE : Tuple = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(_A )
conf_scores.append(_A )
return dec_strs, conf_scores
def UpperCAmelCase__ ( self : List[str] , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(_A )]
return decode_strs
def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ):
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(_A )
def UpperCAmelCase__ ( self : Optional[int] , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(_A )]
return decode_strs
| 74
|
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__snake_case : Optional[int] = logging.getLogger(__name__)
def lowerCamelCase__ ( A_ , A_ ):
# save results
if os.path.exists(A_ ):
if os.path.exists(os.path.join(A_ , "config.json" ) ) and os.path.isfile(
os.path.join(A_ , "config.json" ) ):
os.remove(os.path.join(A_ , "config.json" ) )
if os.path.exists(os.path.join(A_ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(A_ , "pytorch_model.bin" ) ):
os.remove(os.path.join(A_ , "pytorch_model.bin" ) )
else:
os.makedirs(A_ )
model.save_pretrained(A_ )
def lowerCamelCase__ ( A_ , A_=False ):
UpperCAmelCase_ = 2
if unlogit:
UpperCAmelCase_ = torch.pow(A_ , A_ )
UpperCAmelCase_ = p * torch.log(A_ )
UpperCAmelCase_ = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase__ ( A_ ):
logger.info("lv, h >\t" + "\t".join(F"""{x + 1}""" for x in range(len(A_ ) ) ) )
for row in range(len(A_ ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=True , A_=True , A_=None , A_=False ):
UpperCAmelCase_ , UpperCAmelCase_ = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase_ = torch.zeros(A_ , A_ ).to(args.device )
UpperCAmelCase_ = torch.zeros(A_ , A_ ).to(args.device )
if head_mask is None:
UpperCAmelCase_ = torch.ones(A_ , A_ ).to(args.device )
head_mask.requires_grad_(requires_grad=A_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase_ = None
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 0.0
for step, inputs in enumerate(tqdm(A_ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase_ = tuple(t.to(args.device ) for t in inputs )
((UpperCAmelCase_) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase_ = model(A_ , labels=A_ , head_mask=A_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(A_ ):
UpperCAmelCase_ = entropy(attn.detach() , A_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(A_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase_ = 2
UpperCAmelCase_ = torch.pow(torch.pow(A_ , A_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
UpperCAmelCase_ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(A_ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(A_ )
logger.info("Head ranked by importance scores" )
UpperCAmelCase_ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase_ = torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase_ = head_ranks.view_as(A_ )
print_ad_tensor(A_ )
return attn_entropy, head_importance, total_loss
def lowerCamelCase__ ( A_ , A_ , A_ ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(A_ , A_ , A_ , compute_entropy=A_ )
UpperCAmelCase_ = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , A_ , original_score * args.masking_threshold )
UpperCAmelCase_ = torch.ones_like(A_ )
UpperCAmelCase_ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase_ = original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase_ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase_ = float("Inf" )
UpperCAmelCase_ = head_importance.view(-1 ).sort()[1]
if len(A_ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
UpperCAmelCase_ = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase_ = new_head_mask.view(-1 )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = new_head_mask.view_as(A_ )
UpperCAmelCase_ = new_head_mask.clone().detach()
print_ad_tensor(A_ )
# Compute metric and head importance again
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , head_mask=A_ )
UpperCAmelCase_ = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , A_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(A_ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase__ ( A_ , A_ , A_ , A_ ):
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(A_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(A_ , A_ ):
UpperCAmelCase_ = [
v,
]
assert sum(len(A_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(A_ )
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ , actually_pruned=A_ , )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , A_ , A_ , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , A_ , A_ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(A_ , args.output_dir )
def lowerCamelCase__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=A_ , type=A_ , required=A_ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=A_ , type=A_ , required=A_ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=A_ , type=A_ , required=A_ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=A_ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=A_ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=A_ , type=A_ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=A_ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=A_ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=A_ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=A_ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=A_ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=A_ , help="Batch size." )
parser.add_argument("--seed" , type=A_ , default=42 )
parser.add_argument("--local_rank" , type=A_ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=A_ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=A_ , default="" , help="Can be used for distant debugging." )
UpperCAmelCase_ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase_ = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
UpperCAmelCase_ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase_ = torch.device("cuda" , args.local_rank )
UpperCAmelCase_ = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase_ = nn.parallel.DistributedDataParallel(
A_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=A_ )
elif args.n_gpu > 1:
UpperCAmelCase_ = nn.DataParallel(A_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=A_ )
torch.save(A_ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , A_ )
# Prepare dataset
UpperCAmelCase_ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase_ = (torch.from_numpy(A_ ),)
UpperCAmelCase_ = TensorDataset(*A_ )
UpperCAmelCase_ = RandomSampler(A_ )
UpperCAmelCase_ = DataLoader(A_ , sampler=A_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(A_ , A_ , A_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase_ = mask_heads(A_ , A_ , A_ )
prune_heads(A_ , A_ , A_ , A_ )
if __name__ == "__main__":
main()
| 660
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=18 , lowercase__=30 , lowercase__=400 , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , lowercase__=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , lowercase__=True , ) -> str:
SCREAMING_SNAKE_CASE : Tuple = size if size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE : Union[str, Any] = crop_size if crop_size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : Dict = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE : str = max_resolution
SCREAMING_SNAKE_CASE : int = do_resize
SCREAMING_SNAKE_CASE : Dict = size
SCREAMING_SNAKE_CASE : List[Any] = do_center_crop
SCREAMING_SNAKE_CASE : Optional[int] = crop_size
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE : Tuple = image_mean
SCREAMING_SNAKE_CASE : Optional[int] = image_std
SCREAMING_SNAKE_CASE : Optional[Any] = do_convert_rgb
def _UpperCamelCase ( self ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def _UpperCamelCase ( self , lowercase__=False , lowercase__=False , lowercase__=False ) -> str:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
SCREAMING_SNAKE_CASE : List[Any] = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i in range(self.batch_size ):
SCREAMING_SNAKE_CASE : Any = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
SCREAMING_SNAKE_CASE : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
SCREAMING_SNAKE_CASE : List[str] = [torch.from_numpy(lowercase__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : List[Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : int = ChineseCLIPImageProcessingTester(self , do_center_crop=lowercase__ )
@property
def _UpperCamelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowercase__ , 'size' ) )
self.assertTrue(hasattr(lowercase__ , 'do_center_crop' ) )
self.assertTrue(hasattr(lowercase__ , 'center_crop' ) )
self.assertTrue(hasattr(lowercase__ , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase__ , 'image_mean' ) )
self.assertTrue(hasattr(lowercase__ , 'image_std' ) )
self.assertTrue(hasattr(lowercase__ , 'do_convert_rgb' ) )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 224, 'width': 224} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def _UpperCamelCase ( self ) -> Union[str, Any]:
pass
def _UpperCamelCase ( self ) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(lowercase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCamelCase ( self ) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ , numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE : str = image_processing(lowercase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCamelCase ( self ) -> List[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ , torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Dict = image_processing(lowercase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
@require_torch
@require_vision
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Tuple = ChineseCLIPImageProcessor if is_vision_available() else None
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE : Any = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=lowercase__ )
SCREAMING_SNAKE_CASE : List[Any] = 3
@property
def _UpperCamelCase ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowercase__ , 'size' ) )
self.assertTrue(hasattr(lowercase__ , 'do_center_crop' ) )
self.assertTrue(hasattr(lowercase__ , 'center_crop' ) )
self.assertTrue(hasattr(lowercase__ , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase__ , 'image_mean' ) )
self.assertTrue(hasattr(lowercase__ , 'image_std' ) )
self.assertTrue(hasattr(lowercase__ , 'do_convert_rgb' ) )
def _UpperCamelCase ( self ) -> Optional[int]:
pass
def _UpperCamelCase ( self ) -> List[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Any = image_processing(lowercase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 716
|
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase :str = logging.get_logger(__name__)
_lowerCAmelCase :Optional[int] = {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : Optional[int] = "segformer"
def __init__( self , lowercase__=3 , lowercase__=4 , lowercase__=[2, 2, 2, 2] , lowercase__=[8, 4, 2, 1] , lowercase__=[32, 64, 160, 256] , lowercase__=[7, 3, 3, 3] , lowercase__=[4, 2, 2, 2] , lowercase__=[1, 2, 5, 8] , lowercase__=[4, 4, 4, 4] , lowercase__="gelu" , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.0_2 , lowercase__=0.1 , lowercase__=1E-6 , lowercase__=256 , lowercase__=255 , **lowercase__ , ) -> Union[str, Any]:
super().__init__(**lowercase__ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , lowercase__ , )
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : int = num_encoder_blocks
SCREAMING_SNAKE_CASE : List[Any] = depths
SCREAMING_SNAKE_CASE : Union[str, Any] = sr_ratios
SCREAMING_SNAKE_CASE : List[Any] = hidden_sizes
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_sizes
SCREAMING_SNAKE_CASE : int = strides
SCREAMING_SNAKE_CASE : Tuple = mlp_ratios
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = classifier_dropout_prob
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = drop_path_rate
SCREAMING_SNAKE_CASE : int = layer_norm_eps
SCREAMING_SNAKE_CASE : Any = decoder_hidden_size
SCREAMING_SNAKE_CASE : List[str] = kwargs.get('reshape_last_stage' , lowercase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = semantic_loss_ignore_index
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : Tuple = version.parse("1.11" )
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _UpperCamelCase ( self ) -> float:
return 1E-4
@property
def _UpperCamelCase ( self ) -> int:
return 12
| 179
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=18 , _UpperCamelCase=30 , _UpperCamelCase=400 , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , )-> str:
_A = size if size is not None else {'height': 18, 'width': 18}
_A = parent
_A = batch_size
_A = num_channels
_A = image_size
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = apply_ocr
def UpperCamelCase ( self )-> Union[str, Any]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase_ ( UpperCAmelCase , unittest.TestCase ):
__UpperCAmelCase =LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCamelCase ( self )-> Dict:
_A = LayoutLMvaImageProcessingTester(self )
@property
def UpperCamelCase ( self )-> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self )-> List[str]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'apply_ocr' ) )
def UpperCamelCase ( self )-> List[Any]:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
_A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def UpperCamelCase ( self )-> int:
pass
def UpperCamelCase ( self )-> Any:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _UpperCamelCase )
self.assertIsInstance(encoding.boxes , _UpperCamelCase )
# Test batched
_A = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def UpperCamelCase ( self )-> Optional[int]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_A = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def UpperCamelCase ( self )-> Any:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_A = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def UpperCamelCase ( self )-> Any:
# with apply_OCR = True
_A = LayoutLMvaImageProcessor()
from datasets import load_dataset
_A = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
_A = Image.open(ds[0]['file'] ).convert('RGB' )
_A = image_processing(_UpperCamelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_A = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
_A = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _UpperCamelCase )
self.assertListEqual(encoding.boxes , _UpperCamelCase )
# with apply_OCR = False
_A = LayoutLMvaImageProcessor(apply_ocr=_UpperCamelCase )
_A = image_processing(_UpperCamelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 292
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowerCAmelCase = """Create a default config file for Accelerate with only a few flags set."""
def lowerCamelCase_ ( __UpperCamelCase : Union[str, Any]="no" , __UpperCamelCase : str = default_json_config_file , __UpperCamelCase : bool = False ) -> Tuple:
"""simple docstring"""
_A = Path(__UpperCamelCase )
path.parent.mkdir(parents=__UpperCamelCase , exist_ok=__UpperCamelCase )
if path.exists():
print(
F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
_A = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
_A = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
_A = torch.cuda.device_count()
_A = num_gpus
_A = False
if num_gpus > 1:
_A = 'MULTI_GPU'
else:
_A = 'NO'
elif is_xpu_available() and use_xpu:
_A = torch.xpu.device_count()
_A = num_xpus
_A = False
if num_xpus > 1:
_A = 'MULTI_XPU'
else:
_A = 'NO'
elif is_npu_available():
_A = torch.npu.device_count()
_A = num_npus
_A = False
if num_npus > 1:
_A = 'MULTI_NPU'
else:
_A = 'NO'
else:
_A = 0
_A = True
_A = 1
_A = 'NO'
_A = ClusterConfig(**__UpperCamelCase )
config.to_json_file(__UpperCamelCase )
return path
def lowerCamelCase_ ( __UpperCamelCase : List[Any] , __UpperCamelCase : str ) -> Any:
"""simple docstring"""
_A = parser.add_parser('default' , parents=__UpperCamelCase , help=__UpperCamelCase , formatter_class=__UpperCamelCase )
parser.add_argument(
'--config_file' , default=__UpperCamelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=__UpperCamelCase , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=__UpperCamelCase )
return parser
def lowerCamelCase_ ( __UpperCamelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
_A = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'accelerate configuration saved at {config_file}' )
| 292
| 1
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class a__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[int] , A_ : Distribution , A_ : Any=None , A_ : str=None , A_ : Dict=0 ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_: Dict = 1.0 if scale is None else scale
lowerCamelCase_: Dict = 0.0 if loc is None else loc
super().__init__(A_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=A_ )] )
@property
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return self.variance.sqrt()
class a__ ( nn.Module ):
def __init__( self : List[Any] , A_ : int , A_ : Dict[str, int] , A_ : Callable[..., Tuple[torch.Tensor]] , **A_ : Optional[Any] ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_: Optional[int] = args_dim
lowerCamelCase_: List[Any] = nn.ModuleList([nn.Linear(A_ , A_ ) for dim in args_dim.values()] )
lowerCamelCase_: List[Any] = domain_map
def lowerCAmelCase ( self : int , A_ : torch.Tensor ) -> Tuple[torch.Tensor]:
"""simple docstring"""
lowerCamelCase_: List[Any] = [proj(A_ ) for proj in self.proj]
return self.domain_map(*A_ )
class a__ ( nn.Module ):
def __init__( self : Dict , A_ : List[Any] ) -> Tuple:
"""simple docstring"""
super().__init__()
lowerCamelCase_: List[Any] = function
def lowerCAmelCase ( self : Optional[int] , A_ : List[str] , *A_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.function(A_ , *A_ )
class a__ :
_A = 42
_A = 42
_A = 42
def __init__( self : Union[str, Any] , A_ : int = 1 ) -> None:
"""simple docstring"""
lowerCamelCase_: List[Any] = dim
lowerCamelCase_: str = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCAmelCase ( self : List[str] , A_ : Tuple ) -> List[str]:
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*A_ )
else:
return Independent(self.distribution_class(*A_ ) , 1 )
def lowerCAmelCase ( self : Tuple , A_ : str , A_ : Optional[torch.Tensor] = None , A_ : Optional[torch.Tensor] = None , ) -> Distribution:
"""simple docstring"""
lowerCamelCase_: Tuple = self._base_distribution(A_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(A_ , loc=A_ , scale=A_ , event_dim=self.event_dim )
@property
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
return len(self.event_shape )
@property
def lowerCAmelCase ( self : Any ) -> float:
"""simple docstring"""
return 0.0
def lowerCAmelCase ( self : List[str] , A_ : int ) -> nn.Module:
"""simple docstring"""
return ParameterProjection(
in_features=A_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCAmelCase ( self : List[Any] , *A_ : torch.Tensor ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def lowerCAmelCase ( A_ : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return (x + torch.sqrt(torch.square(A_ ) + 4.0 )) / 2.0
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = {"df": 1, "loc": 1, "scale": 1}
_A = StudentT
@classmethod
def lowerCAmelCase ( cls : List[Any] , A_ : torch.Tensor , A_ : torch.Tensor , A_ : torch.Tensor ) -> Dict:
"""simple docstring"""
lowerCamelCase_: Union[str, Any] = cls.squareplus(A_ ).clamp_min(torch.finfo(scale.dtype ).eps )
lowerCamelCase_: Any = 2.0 + cls.squareplus(A_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = {"loc": 1, "scale": 1}
_A = Normal
@classmethod
def lowerCAmelCase ( cls : Optional[Any] , A_ : torch.Tensor , A_ : torch.Tensor ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_: Tuple = cls.squareplus(A_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = {"total_count": 1, "logits": 1}
_A = NegativeBinomial
@classmethod
def lowerCAmelCase ( cls : Optional[Any] , A_ : torch.Tensor , A_ : torch.Tensor ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_: List[str] = cls.squareplus(A_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCAmelCase ( self : int , A_ : List[Any] ) -> Distribution:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_: List[str] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=A_ , logits=A_ )
else:
return Independent(self.distribution_class(total_count=A_ , logits=A_ ) , 1 )
def lowerCAmelCase ( self : str , A_ : int , A_ : Optional[torch.Tensor] = None , A_ : Optional[torch.Tensor] = None ) -> Distribution:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_: List[str] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 584
|
from graphs.minimum_spanning_tree_kruskal import kruskal
def UpperCAmelCase_ ( ):
lowerCamelCase_: str = 9
lowerCamelCase_: Tuple = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
lowerCamelCase_: List[str] = kruskal(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase_: int = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(_UpperCAmelCase ) == sorted(_UpperCAmelCase )
| 584
| 1
|
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : Any = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Any ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
_A = TOKENIZER_CLASSES
else:
_A = {tokenizer_name: getattr(__snake_case , tokenizer_name + 'Fast' )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
_A = TOKENIZER_CLASSES[tokenizer_name]
_A = True
if checkpoint_name is None:
_A = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_A = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
_A = tokenizer_class.from_pretrained(__snake_case , force_download=__snake_case )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
_A , _A = checkpoint.split('/' )
_A = os.path.join(__snake_case , __snake_case )
elif add_prefix:
_A = checkpoint
_A = dump_path
else:
_A = None
_A = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_A = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_A = file_path.split(__snake_case )[-1][0]
if next_char == "/":
_A = os.path.join(__snake_case , __snake_case )
_A = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
_A = tokenizer.save_pretrained(
__snake_case , legacy_format=__snake_case , filename_prefix=__snake_case )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(__snake_case )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
_UpperCAmelCase : int = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 107
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( __snake_case : str , __snake_case : str , __snake_case : List[Any]=False ):
if isinstance(__snake_case , __snake_case ) and isinstance(__snake_case , __snake_case ):
_A = len(set_a.intersection(__snake_case ) )
if alternative_union:
_A = len(__snake_case ) + len(__snake_case )
else:
_A = len(set_a.union(__snake_case ) )
return intersection / union
if isinstance(__snake_case , (list, tuple) ) and isinstance(__snake_case , (list, tuple) ):
_A = [element for element in set_a if element in set_b]
if alternative_union:
_A = len(__snake_case ) + len(__snake_case )
return len(__snake_case ) / union
else:
_A = set_a + [element for element in set_b if element not in set_a]
return len(__snake_case ) / len(__snake_case )
return len(__snake_case ) / len(__snake_case )
return None
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = {'''a''', '''b''', '''c''', '''d''', '''e'''}
_UpperCAmelCase : str = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 107
| 1
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase=0.9_99 , lowerCAmelCase="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
UpperCAmelCase = []
for i in range(lowerCamelCase__ ):
UpperCAmelCase = i / num_diffusion_timesteps
UpperCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase__ ) / alpha_bar_fn(lowerCamelCase__ ) , lowerCamelCase__ ) )
return torch.tensor(lowerCamelCase__ , dtype=torch.floataa )
class UpperCamelCase_ ( a_ , a_ ):
_A : Optional[int] = [e.name for e in KarrasDiffusionSchedulers]
_A : Dict = 2
@register_to_config
def __init__( self , snake_case__ = 10_00 , snake_case__ = 0.00_085 , snake_case__ = 0.012 , snake_case__ = "linear" , snake_case__ = None , snake_case__ = "epsilon" , snake_case__ = "linspace" , snake_case__ = 0 , ) -> Union[str, Any]:
"""simple docstring"""
if trained_betas is not None:
UpperCAmelCase = torch.tensor(UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCAmelCase = torch.linspace(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCAmelCase = betas_for_alpha_bar(UpperCamelCase_ )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
UpperCAmelCase = 1.0 - self.betas
UpperCAmelCase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def UpperCamelCase_ ( self , snake_case__ , snake_case__=None ) -> Tuple:
"""simple docstring"""
if schedule_timesteps is None:
UpperCAmelCase = self.timesteps
UpperCAmelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCAmelCase = 1 if len(UpperCamelCase_ ) > 1 else 0
else:
UpperCAmelCase = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
UpperCAmelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.index_for_timestep(UpperCamelCase_ )
if self.state_in_first_order:
UpperCAmelCase = self.sigmas[step_index]
else:
UpperCAmelCase = self.sigmas_interpol[step_index]
UpperCAmelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = None , snake_case__ = None , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = num_inference_steps
UpperCAmelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCAmelCase = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase_ , dtype=UpperCamelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCAmelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCAmelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase = (np.arange(UpperCamelCase_ , 0 , -step_ratio )).round().copy().astype(UpperCamelCase_ )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
UpperCAmelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCAmelCase = torch.from_numpy(np.log(UpperCamelCase_ ) ).to(UpperCamelCase_ )
UpperCAmelCase = np.interp(UpperCamelCase_ , np.arange(0 , len(UpperCamelCase_ ) ) , UpperCamelCase_ )
UpperCAmelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCAmelCase = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ )
# interpolate sigmas
UpperCAmelCase = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
UpperCAmelCase = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
UpperCAmelCase = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCamelCase_ ).startswith("""mps""" ):
# mps does not support float64
UpperCAmelCase = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ , dtype=torch.floataa )
else:
UpperCAmelCase = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ )
# interpolate timesteps
UpperCAmelCase = self.sigma_to_t(UpperCamelCase_ ).to(UpperCamelCase_ , dtype=timesteps.dtype )
UpperCAmelCase = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
UpperCAmelCase = torch.cat([timesteps[:1], interleaved_timesteps] )
UpperCAmelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCAmelCase = defaultdict(UpperCamelCase_ )
def UpperCamelCase_ ( self , snake_case__ ) -> Dict:
"""simple docstring"""
UpperCAmelCase = sigma.log()
# get distribution
UpperCAmelCase = log_sigma - self.log_sigmas[:, None]
# get sigmas range
UpperCAmelCase = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
UpperCAmelCase = low_idx + 1
UpperCAmelCase = self.log_sigmas[low_idx]
UpperCAmelCase = self.log_sigmas[high_idx]
# interpolate sigmas
UpperCAmelCase = (low - log_sigma) / (low - high)
UpperCAmelCase = w.clamp(0 , 1 )
# transform interpolation to time range
UpperCAmelCase = (1 - w) * low_idx + w * high_idx
UpperCAmelCase = t.view(sigma.shape )
return t
@property
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
return self.sample is None
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.index_for_timestep(UpperCamelCase_ )
# advance index counter by 1
UpperCAmelCase = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCAmelCase = self.sigmas[step_index]
UpperCAmelCase = self.sigmas_interpol[step_index + 1]
UpperCAmelCase = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
UpperCAmelCase = self.sigmas[step_index - 1]
UpperCAmelCase = self.sigmas_interpol[step_index]
UpperCAmelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCAmelCase = 0
UpperCAmelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCAmelCase = sigma_hat if self.state_in_first_order else sigma_interpol
UpperCAmelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase = sigma_hat if self.state_in_first_order else sigma_interpol
UpperCAmelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCAmelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCAmelCase = sigma_interpol - sigma_hat
# store for 2nd order step
UpperCAmelCase = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
UpperCAmelCase = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
UpperCAmelCase = sigma_next - sigma_hat
UpperCAmelCase = self.sample
UpperCAmelCase = None
UpperCAmelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase_ ):
# mps does not support float64
UpperCAmelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
UpperCAmelCase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
UpperCAmelCase = self.timesteps.to(original_samples.device )
UpperCAmelCase = timesteps.to(original_samples.device )
UpperCAmelCase = [self.index_for_timestep(UpperCamelCase_ , UpperCamelCase_ ) for t in timesteps]
UpperCAmelCase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCAmelCase = sigma.unsqueeze(-1 )
UpperCAmelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> int:
"""simple docstring"""
return self.config.num_train_timesteps
| 710
|
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowerCAmelCase_ : List[str] = {
'''bart''': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''bert''': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-base-cased-finetuned-mrpc''': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''dpr''': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''gpt2''': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlnet''': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm''': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm-roberta''': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''transfo-xl''': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''openai-gpt''': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''roberta''': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''layoutlm''': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''roberta-large-mnli''': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''camembert''': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''flaubert''': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert''': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert-base-distilled-squad''': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert''': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert-visual-feature-encoder''': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''ctrl''': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''albert''': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''t5''': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''electra''': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''wav2vec2''': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=True ):
'''simple docstring'''
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
UpperCAmelCase = cached_file(lowerCAmelCase , lowerCAmelCase , force_download=not use_cached_models )
UpperCAmelCase = config_class.from_json_file(lowerCAmelCase )
UpperCAmelCase = True
UpperCAmelCase = True
print(F'''Building TensorFlow model from configuration: {config}''' )
UpperCAmelCase = model_class(lowerCAmelCase )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
UpperCAmelCase = cached_file(
lowerCAmelCase , lowerCAmelCase , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
UpperCAmelCase = load_pytorch_checkpoint_in_tfa_model(lowerCAmelCase , lowerCAmelCase )
if compare_with_pt_model:
UpperCAmelCase = tf_model(tf_model.dummy_inputs , training=lowerCAmelCase ) # build the network
UpperCAmelCase = torch.load(lowerCAmelCase , map_location="""cpu""" )
UpperCAmelCase = pt_model_class.from_pretrained(
pretrained_model_name_or_path=lowerCAmelCase , config=lowerCAmelCase , state_dict=lowerCAmelCase )
with torch.no_grad():
UpperCAmelCase = pt_model(**pt_model.dummy_inputs )
UpperCAmelCase = pto[0].numpy()
UpperCAmelCase = tfo[0].numpy()
UpperCAmelCase = np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(lowerCAmelCase , save_format="""h5""" )
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , ):
'''simple docstring'''
if args_model_type is None:
UpperCAmelCase = list(MODEL_CLASSES.keys() )
else:
UpperCAmelCase = [args_model_type]
for j, model_type in enumerate(lowerCAmelCase , start=1 ):
print("""=""" * 100 )
print(F''' Converting model type {j}/{len(lowerCAmelCase )}: {model_type}''' )
print("""=""" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
UpperCAmelCase = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
UpperCAmelCase = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(lowerCAmelCase , lowerCAmelCase ) , start=1 ):
print("""-""" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
UpperCAmelCase = model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(lowerCAmelCase )}: {model_shortcut_name} - model_type {model_type}''' )
print("""-""" * 100 )
if config_shortcut_name in aws_config_map:
UpperCAmelCase = cached_file(lowerCAmelCase , lowerCAmelCase , force_download=not use_cached_models )
else:
UpperCAmelCase = config_shortcut_name
if model_shortcut_name in aws_model_maps:
UpperCAmelCase = cached_file(lowerCAmelCase , lowerCAmelCase , force_download=not use_cached_models )
else:
UpperCAmelCase = model_shortcut_name
if os.path.isfile(lowerCAmelCase ):
UpperCAmelCase = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=lowerCAmelCase , pytorch_checkpoint_path=lowerCAmelCase , config_file=lowerCAmelCase , tf_dump_path=os.path.join(lowerCAmelCase , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=lowerCAmelCase , )
if remove_cached_files:
os.remove(lowerCAmelCase )
os.remove(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.'''
)
parser.add_argument(
'''--model_type''',
default=None,
type=str,
help=(
F'Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '
'''convert all the models from AWS.'''
),
)
parser.add_argument(
'''--pytorch_checkpoint_path''',
default=None,
type=str,
help=(
'''Path to the PyTorch checkpoint path or shortcut name to download from AWS. '''
'''If not given, will download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
help=(
'''The config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture. If not given and '''
'''--pytorch_checkpoint_path is not given or is a shortcut name '''
'''use the configuration associated to the shortcut name on the AWS'''
),
)
parser.add_argument(
'''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.'''
)
parser.add_argument(
'''--use_cached_models''',
action='''store_true''',
help='''Use cached models if possible instead of updating to latest checkpoint versions.''',
)
parser.add_argument(
'''--remove_cached_files''',
action='''store_true''',
help='''Remove pytorch models after conversion (save memory when converting in batches).''',
)
parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''')
lowerCAmelCase_ : Optional[int] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 378
| 0
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ ):
_A = data
_A = [0X6745_2301, 0XEFCD_AB89, 0X98BA_DCFE, 0X1032_5476, 0XC3D2_E1F0]
@staticmethod
def lowerCAmelCase__ ( snake_case_ , snake_case_ ):
return ((n << b) | (n >> (32 - b))) & 0XFFFF_FFFF
def lowerCAmelCase__ ( self ):
_A = B'\x80' + B'\x00' * (63 - (len(self.data ) + 8) % 64)
_A = self.data + padding + struct.pack('>Q' , 8 * len(self.data ) )
return padded_data
def lowerCAmelCase__ ( self ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def lowerCAmelCase__ ( self , snake_case_ ):
_A = list(struct.unpack('>16L' , snake_case_ ) ) + [0] * 64
for i in range(16 , 80 ):
_A = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def lowerCAmelCase__ ( self ):
_A = self.padding()
_A = self.split_blocks()
for block in self.blocks:
_A = self.expand_block(snake_case_ )
_A, _A, _A, _A, _A = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
_A = (b & c) | ((~b) & d)
_A = 0X5A82_7999
elif 20 <= i < 40:
_A = b ^ c ^ d
_A = 0X6ED9_EBA1
elif 40 <= i < 60:
_A = (b & c) | (b & d) | (c & d)
_A = 0X8F1B_BCDC
elif 60 <= i < 80:
_A = b ^ c ^ d
_A = 0XCA62_C1D6
_A, _A, _A, _A, _A = (
self.rotate(snake_case_ , 5 ) + f + e + k + expanded_block[i] & 0XFFFF_FFFF,
a,
self.rotate(snake_case_ , 30 ),
c,
d,
)
_A = (
self.h[0] + a & 0XFFFF_FFFF,
self.h[1] + b & 0XFFFF_FFFF,
self.h[2] + c & 0XFFFF_FFFF,
self.h[3] + d & 0XFFFF_FFFF,
self.h[4] + e & 0XFFFF_FFFF,
)
return ("{:08x}" * 5).format(*self.h )
def __lowerCAmelCase( ) -> Any:
"""simple docstring"""
_A = b'Test String'
assert SHAaHash(_SCREAMING_SNAKE_CASE ).final_hash() == hashlib.shaa(_SCREAMING_SNAKE_CASE ).hexdigest() # noqa: S324
def __lowerCAmelCase( ) -> int:
"""simple docstring"""
_A = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
_A = parser.parse_args()
_A = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_A = f.read()
else:
_A = bytes(_SCREAMING_SNAKE_CASE , 'utf-8' )
print(SHAaHash(_SCREAMING_SNAKE_CASE ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 27
|
from collections.abc import Callable
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
_A = a
_A = b
if function(_SCREAMING_SNAKE_CASE ) == 0: # one of the a or b is a root for the function
return a
elif function(_SCREAMING_SNAKE_CASE ) == 0:
return b
elif (
function(_SCREAMING_SNAKE_CASE ) * function(_SCREAMING_SNAKE_CASE ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
_A = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_SCREAMING_SNAKE_CASE ) == 0:
return mid
elif function(_SCREAMING_SNAKE_CASE ) * function(_SCREAMING_SNAKE_CASE ) < 0:
_A = mid
else:
_A = mid
_A = start + (end - start) / 2.0
return mid
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 27
| 1
|
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =SwinConfig()
__magic_name__ : Optional[int] =swin_name.split("""_""" )
__magic_name__ : Union[str, Any] =name_split[1]
__magic_name__ : Union[str, Any] =int(name_split[4] )
__magic_name__ : Union[str, Any] =int(name_split[3][-1] )
if model_size == "tiny":
__magic_name__ : List[str] =96
__magic_name__ : Tuple =(2, 2, 6, 2)
__magic_name__ : Union[str, Any] =(3, 6, 12, 24)
elif model_size == "small":
__magic_name__ : Dict =96
__magic_name__ : Union[str, Any] =(2, 2, 18, 2)
__magic_name__ : Optional[Any] =(3, 6, 12, 24)
elif model_size == "base":
__magic_name__ : List[Any] =128
__magic_name__ : Union[str, Any] =(2, 2, 18, 2)
__magic_name__ : Any =(4, 8, 16, 32)
else:
__magic_name__ : List[str] =192
__magic_name__ : Union[str, Any] =(2, 2, 18, 2)
__magic_name__ : Dict =(6, 12, 24, 48)
if "in22k" in swin_name:
__magic_name__ : str =21841
else:
__magic_name__ : Union[str, Any] =1000
__magic_name__ : Dict ="""huggingface/label-files"""
__magic_name__ : str ="""imagenet-1k-id2label.json"""
__magic_name__ : Dict =json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__magic_name__ : Optional[int] ={int(lowerCamelCase ): v for k, v in idalabel.items()}
__magic_name__ : Dict =idalabel
__magic_name__ : Any ={v: k for k, v in idalabel.items()}
__magic_name__ : Any =img_size
__magic_name__ : int =num_classes
__magic_name__ : Dict =embed_dim
__magic_name__ : List[Any] =depths
__magic_name__ : Tuple =num_heads
__magic_name__ : Optional[int] =window_size
return config
def lowerCAmelCase_ ( lowerCamelCase ):
if "patch_embed.proj" in name:
__magic_name__ : Optional[int] =name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__magic_name__ : Tuple =name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
__magic_name__ : List[Any] ="""encoder.""" + name
if "attn.proj" in name:
__magic_name__ : Tuple =name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__magic_name__ : Dict =name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__magic_name__ : Union[str, Any] =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__magic_name__ : List[str] =name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__magic_name__ : Tuple =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__magic_name__ : Tuple =name.replace("""mlp.fc2""" , """output.dense""" )
if name == "norm.weight":
__magic_name__ : int ="""layernorm.weight"""
if name == "norm.bias":
__magic_name__ : Optional[int] ="""layernorm.bias"""
if "head" in name:
__magic_name__ : List[str] =name.replace("""head""" , """classifier""" )
else:
__magic_name__ : Union[str, Any] ="""swin.""" + name
return name
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
for key in orig_state_dict.copy().keys():
__magic_name__ : Optional[int] =orig_state_dict.pop(lowerCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
__magic_name__ : Union[str, Any] =key.split(""".""" )
__magic_name__ : Optional[Any] =int(key_split[1] )
__magic_name__ : Union[str, Any] =int(key_split[3] )
__magic_name__ : Optional[int] =model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__magic_name__ : List[str] =val[:dim, :]
__magic_name__ : Optional[int] =val[
dim : dim * 2, :
]
__magic_name__ : List[str] =val[-dim:, :]
else:
__magic_name__ : str =val[
:dim
]
__magic_name__ : Optional[Any] =val[
dim : dim * 2
]
__magic_name__ : int =val[
-dim:
]
else:
__magic_name__ : Tuple =val
return orig_state_dict
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : List[Any] =timm.create_model(lowerCamelCase , pretrained=lowerCamelCase )
timm_model.eval()
__magic_name__ : Optional[int] =get_swin_config(lowerCamelCase )
__magic_name__ : Optional[int] =SwinForImageClassification(lowerCamelCase )
model.eval()
__magic_name__ : List[Any] =convert_state_dict(timm_model.state_dict() , lowerCamelCase )
model.load_state_dict(lowerCamelCase )
__magic_name__ : str ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
__magic_name__ : Optional[Any] =AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) )
__magic_name__ : List[Any] =Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
__magic_name__ : int =image_processor(images=lowerCamelCase , return_tensors="""pt""" )
__magic_name__ : Dict =timm_model(inputs["""pixel_values"""] )
__magic_name__ : Dict =model(**lowerCamelCase ).logits
assert torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 )
print(F"Saving model {swin_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swin_name",
default="swin_tiny_patch4_window7_224",
type=str,
help="Name of the Swin timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 367
|
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
UpperCAmelCase_ : Dict = 637_8137.0
UpperCAmelCase_ : List[Any] = 635_6752.31_4245
UpperCAmelCase_ : List[str] = 6378137
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : str =(AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__magic_name__ : str =atan((1 - flattening) * tan(radians(lowerCamelCase ) ) )
__magic_name__ : List[Any] =atan((1 - flattening) * tan(radians(lowerCamelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__magic_name__ : List[Any] =haversine_distance(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__magic_name__ : Tuple =(b_lata + b_lata) / 2
__magic_name__ : int =(b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__magic_name__ : Optional[int] =(sin(lowerCamelCase ) ** 2) * (cos(lowerCamelCase ) ** 2)
__magic_name__ : Any =cos(sigma / 2 ) ** 2
__magic_name__ : List[Any] =(sigma - sin(lowerCamelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__magic_name__ : Any =(cos(lowerCamelCase ) ** 2) * (sin(lowerCamelCase ) ** 2)
__magic_name__ : Optional[Any] =sin(sigma / 2 ) ** 2
__magic_name__ : str =(sigma + sin(lowerCamelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367
| 1
|
from itertools import product
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> list[int]:
lowerCamelCase : List[str] = sides_number
lowerCamelCase : Any = max_face_number * dice_number
lowerCamelCase : Optional[Any] = [0] * (max_total + 1)
lowerCamelCase : Optional[int] = 1
lowerCamelCase : Optional[Any] = range(_SCREAMING_SNAKE_CASE ,max_face_number + 1 )
for dice_numbers in product(_SCREAMING_SNAKE_CASE ,repeat=_SCREAMING_SNAKE_CASE ):
lowerCamelCase : List[Any] = sum(_SCREAMING_SNAKE_CASE )
totals_frequencies[total] += 1
return totals_frequencies
def A ( ) -> float:
lowerCamelCase : List[str] = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
lowerCamelCase : int = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
lowerCamelCase : Optional[int] = 0
lowerCamelCase : Dict = 9
lowerCamelCase : Any = 4 * 9
lowerCamelCase : Tuple = 6
for peter_total in range(_SCREAMING_SNAKE_CASE ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCamelCase : Optional[Any] = (4**9) * (6**6)
lowerCamelCase : int = peter_wins_count / total_games_number
lowerCamelCase : Dict = round(_SCREAMING_SNAKE_CASE ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'''{solution() = }''')
| 311
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 311
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class UpperCamelCase__( __A ):
"""simple docstring"""
_A = "sew-d"
def __init__( self : Optional[Any] , snake_case__ : Any=32 , snake_case__ : str=7_68 , snake_case__ : List[Any]=12 , snake_case__ : List[str]=12 , snake_case__ : List[Any]=30_72 , snake_case__ : Optional[int]=2 , snake_case__ : List[str]=5_12 , snake_case__ : str=2_56 , snake_case__ : Any=True , snake_case__ : int=True , snake_case__ : str=("p2c", "c2p") , snake_case__ : Optional[Any]="layer_norm" , snake_case__ : Dict="gelu_python" , snake_case__ : Optional[Any]=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : List[Any]=0.0 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : str=0.02 , snake_case__ : List[str]=1E-7 , snake_case__ : Dict=1E-5 , snake_case__ : Optional[Any]="group" , snake_case__ : str="gelu" , snake_case__ : str=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , snake_case__ : Optional[int]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , snake_case__ : int=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , snake_case__ : Union[str, Any]=False , snake_case__ : Dict=1_28 , snake_case__ : Union[str, Any]=16 , snake_case__ : int=True , snake_case__ : List[str]=0.05 , snake_case__ : Tuple=10 , snake_case__ : Any=2 , snake_case__ : Dict=0.0 , snake_case__ : Tuple=10 , snake_case__ : List[str]=0 , snake_case__ : Optional[Any]="mean" , snake_case__ : Any=False , snake_case__ : int=False , snake_case__ : List[str]=2_56 , snake_case__ : str=0 , snake_case__ : List[str]=1 , snake_case__ : Tuple=2 , **snake_case__ : int , ):
"""simple docstring"""
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
A =hidden_size
A =feat_extract_norm
A =feat_extract_activation
A =list(snake_case__ )
A =list(snake_case__ )
A =list(snake_case__ )
A =conv_bias
A =num_conv_pos_embeddings
A =num_conv_pos_embedding_groups
A =len(self.conv_dim )
A =num_hidden_layers
A =intermediate_size
A =squeeze_factor
A =max_position_embeddings
A =position_buckets
A =share_att_key
A =relative_attention
A =norm_rel_ebd
A =list(snake_case__ )
A =hidden_act
A =num_attention_heads
A =hidden_dropout
A =attention_dropout
A =activation_dropout
A =feat_proj_dropout
A =final_dropout
A =layer_norm_eps
A =feature_layer_norm_eps
A =initializer_range
A =vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A =apply_spec_augment
A =mask_time_prob
A =mask_time_length
A =mask_time_min_masks
A =mask_feature_prob
A =mask_feature_length
A =mask_feature_min_masks
# ctc loss
A =ctc_loss_reduction
A =ctc_zero_infinity
# sequence classification
A =use_weighted_layer_sum
A =classifier_proj_size
@property
def _a ( self : int ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 708
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A =AutoencoderKL(
sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
A =UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def _a ( self : int ):
"""simple docstring"""
A ="cpu" # ensure determinism for the device-dependent torch.Generator
A =Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
A =DDPMScheduler()
A =AudioDiffusionPipeline(vqvae=snake_case__ , unet=self.dummy_unet , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ , steps=4 )
A =output.audios[0]
A =output.images[0]
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ , steps=4 , return_dict=snake_case__ )
A =output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
A =np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
A =Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
A =DDIMScheduler()
A =self.dummy_vqvae_and_unet
A =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
np.random.seed(0 )
A =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(raw_audio=snake_case__ , generator=snake_case__ , start_step=5 , steps=10 )
A =output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
A =self.dummy_unet_condition
A =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=snake_case__ , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
np.random.seed(0 )
A =torch.rand((1, 1, 10) )
A =pipe(generator=snake_case__ , encoding=snake_case__ )
A =output.images[0]
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =torch_device
A =DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ )
A =output.audios[0]
A =output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 689
| 0
|
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase: int=28_123 ) -> str:
'''simple docstring'''
lowerCAmelCase = [1] * (limit + 1)
for i in range(2, int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1, limit // i + 1 ):
sum_divs[k * i] += k + i
lowerCAmelCase = set()
lowerCAmelCase = 0
for n in range(1, limit + 1 ):
if sum_divs[n] > n:
abundants.add(_lowerCamelCase )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 535
|
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class lowercase ( lowercase__ ):
def __get__(self : str ,SCREAMING_SNAKE_CASE_ : List[Any] ,SCREAMING_SNAKE_CASE_ : Optional[Any]=None ) -> int:
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
lowerCAmelCase = '''__cached_''' + self.fget.__name__
lowerCAmelCase = getattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
if cached is None:
lowerCAmelCase = self.fget(SCREAMING_SNAKE_CASE_ )
setattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
return cached
def __magic_name__ ( _lowerCamelCase: Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"""invalid truth value {val!r}""" )
def __magic_name__ ( _lowerCamelCase: int ) -> Union[str, Any]:
'''simple docstring'''
if is_torch_fx_proxy(_lowerCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(_lowerCamelCase, torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_lowerCamelCase, tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_lowerCamelCase, (jnp.ndarray, Tracer) ):
return True
return isinstance(_lowerCamelCase, np.ndarray )
def __magic_name__ ( _lowerCamelCase: Optional[Any] ) -> Tuple:
'''simple docstring'''
return isinstance(_lowerCamelCase, np.ndarray )
def __magic_name__ ( _lowerCamelCase: str ) -> List[Any]:
'''simple docstring'''
return _is_numpy(_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase: Dict ) -> Optional[int]:
'''simple docstring'''
import torch
return isinstance(_lowerCamelCase, torch.Tensor )
def __magic_name__ ( _lowerCamelCase: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch(_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase: Optional[Any] ) -> List[Any]:
'''simple docstring'''
import torch
return isinstance(_lowerCamelCase, torch.device )
def __magic_name__ ( _lowerCamelCase: List[Any] ) -> int:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase: str ) -> Dict:
'''simple docstring'''
import torch
if isinstance(_lowerCamelCase, _lowerCamelCase ):
if hasattr(_lowerCamelCase, _lowerCamelCase ):
lowerCAmelCase = getattr(_lowerCamelCase, _lowerCamelCase )
else:
return False
return isinstance(_lowerCamelCase, torch.dtype )
def __magic_name__ ( _lowerCamelCase: Optional[Any] ) -> Tuple:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase: Tuple ) -> Tuple:
'''simple docstring'''
import tensorflow as tf
return isinstance(_lowerCamelCase, tf.Tensor )
def __magic_name__ ( _lowerCamelCase: int ) -> Tuple:
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_lowerCamelCase, '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(_lowerCamelCase )
return type(_lowerCamelCase ) == tf.Tensor
def __magic_name__ ( _lowerCamelCase: Union[str, Any] ) -> str:
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase: List[Any] ) -> List[Any]:
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(_lowerCamelCase, jnp.ndarray )
def __magic_name__ ( _lowerCamelCase: Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return False if not is_flax_available() else _is_jax(_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase: List[str] ) -> Any:
'''simple docstring'''
if isinstance(_lowerCamelCase, (dict, UserDict) ):
return {k: to_py_obj(_lowerCamelCase ) for k, v in obj.items()}
elif isinstance(_lowerCamelCase, (list, tuple) ):
return [to_py_obj(_lowerCamelCase ) for o in obj]
elif is_tf_tensor(_lowerCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(_lowerCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_lowerCamelCase ):
return np.asarray(_lowerCamelCase ).tolist()
elif isinstance(_lowerCamelCase, (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def __magic_name__ ( _lowerCamelCase: Optional[int] ) -> Any:
'''simple docstring'''
if isinstance(_lowerCamelCase, (dict, UserDict) ):
return {k: to_numpy(_lowerCamelCase ) for k, v in obj.items()}
elif isinstance(_lowerCamelCase, (list, tuple) ):
return np.array(_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
return obj.numpy()
elif is_torch_tensor(_lowerCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_lowerCamelCase ):
return np.asarray(_lowerCamelCase )
else:
return obj
class lowercase ( lowercase__ ):
def UpperCAmelCase (self : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = fields(self )
# Safety and consistency checks
if not len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(F"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F"""{self.__class__.__name__} should not have more than one required field.""" )
lowerCAmelCase = getattr(self ,class_fields[0].name )
lowerCAmelCase = all(getattr(self ,field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase = first_field.items()
lowerCAmelCase = True
else:
try:
lowerCAmelCase = iter(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = True
except TypeError:
lowerCAmelCase = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(SCREAMING_SNAKE_CASE_ ):
if (
not isinstance(SCREAMING_SNAKE_CASE_ ,(list, tuple) )
or not len(SCREAMING_SNAKE_CASE_ ) == 2
or not isinstance(element[0] ,SCREAMING_SNAKE_CASE_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowerCAmelCase = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self ,element[0] ,element[1] )
if element[1] is not None:
lowerCAmelCase = element[1]
elif first_field is not None:
lowerCAmelCase = first_field
else:
for field in class_fields:
lowerCAmelCase = getattr(self ,field.name )
if v is not None:
lowerCAmelCase = v
def __delitem__(self : Optional[Any] ,*SCREAMING_SNAKE_CASE_ : Tuple ,**SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple:
"""simple docstring"""
raise Exception(F"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def UpperCAmelCase (self : List[Any] ,*SCREAMING_SNAKE_CASE_ : Any ,**SCREAMING_SNAKE_CASE_ : int ) -> List[Any]:
"""simple docstring"""
raise Exception(F"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def UpperCAmelCase (self : Union[str, Any] ,*SCREAMING_SNAKE_CASE_ : List[Any] ,**SCREAMING_SNAKE_CASE_ : Dict ) -> Union[str, Any]:
"""simple docstring"""
raise Exception(F"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def UpperCAmelCase (self : Any ,*SCREAMING_SNAKE_CASE_ : Optional[Any] ,**SCREAMING_SNAKE_CASE_ : Dict ) -> Any:
"""simple docstring"""
raise Exception(F"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__(self : int ,SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[int]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__(self : int ,SCREAMING_SNAKE_CASE_ : Any ,SCREAMING_SNAKE_CASE_ : int ) -> List[str]:
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
super().__setattr__(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def __setitem__(self : List[str] ,SCREAMING_SNAKE_CASE_ : str ,SCREAMING_SNAKE_CASE_ : Tuple ) -> List[Any]:
"""simple docstring"""
super().__setitem__(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : List[str] ) -> Tuple[Any]:
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class lowercase ( lowercase__ ,lowercase__ ):
@classmethod
def UpperCAmelCase (cls : int ,SCREAMING_SNAKE_CASE_ : Any ) -> Dict:
"""simple docstring"""
raise ValueError(
F"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class lowercase ( lowercase__ ):
lowercase = '''longest'''
lowercase = '''max_length'''
lowercase = '''do_not_pad'''
class lowercase ( lowercase__ ):
lowercase = '''pt'''
lowercase = '''tf'''
lowercase = '''np'''
lowercase = '''jax'''
class lowercase :
def __init__(self : Optional[Any] ,SCREAMING_SNAKE_CASE_ : List[ContextManager] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = context_managers
lowerCAmelCase = ExitStack()
def __enter__(self : int ) -> Dict:
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(SCREAMING_SNAKE_CASE_ )
def __exit__(self : Tuple ,*SCREAMING_SNAKE_CASE_ : Tuple ,**SCREAMING_SNAKE_CASE_ : int ) -> str:
"""simple docstring"""
self.stack.__exit__(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def __magic_name__ ( _lowerCamelCase: str ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase = infer_framework(_lowerCamelCase )
if framework == "tf":
lowerCAmelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def __magic_name__ ( _lowerCamelCase: Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = model_class.__name__
lowerCAmelCase = infer_framework(_lowerCamelCase )
if framework == "tf":
lowerCAmelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def __magic_name__ ( _lowerCamelCase: MutableMapping, _lowerCamelCase: str = "", _lowerCamelCase: str = "." ) -> Optional[int]:
'''simple docstring'''
def _flatten_dict(_lowerCamelCase: Any, _lowerCamelCase: Optional[int]="", _lowerCamelCase: Union[str, Any]="." ):
for k, v in d.items():
lowerCAmelCase = str(_lowerCamelCase ) + delimiter + str(_lowerCamelCase ) if parent_key else k
if v and isinstance(_lowerCamelCase, _lowerCamelCase ):
yield from flatten_dict(_lowerCamelCase, _lowerCamelCase, delimiter=_lowerCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) )
@contextmanager
def __magic_name__ ( _lowerCamelCase: Union[str, Any], _lowerCamelCase: bool = False ) -> Tuple:
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def __magic_name__ ( _lowerCamelCase: Optional[int], _lowerCamelCase: int=None ) -> List[Any]:
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.transpose(_lowerCamelCase, axes=_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.T if axes is None else array.permute(*_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.transpose(_lowerCamelCase, perm=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.transpose(_lowerCamelCase, axes=_lowerCamelCase )
else:
raise ValueError(F"""Type not supported for transpose: {type(_lowerCamelCase )}.""" )
def __magic_name__ ( _lowerCamelCase: List[Any], _lowerCamelCase: Any ) -> List[str]:
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.reshape(_lowerCamelCase, _lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.reshape(*_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.reshape(_lowerCamelCase, _lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.reshape(_lowerCamelCase, _lowerCamelCase )
else:
raise ValueError(F"""Type not supported for reshape: {type(_lowerCamelCase )}.""" )
def __magic_name__ ( _lowerCamelCase: int, _lowerCamelCase: Tuple=None ) -> List[str]:
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.squeeze(_lowerCamelCase, axis=_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.squeeze(_lowerCamelCase, axis=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.squeeze(_lowerCamelCase, axis=_lowerCamelCase )
else:
raise ValueError(F"""Type not supported for squeeze: {type(_lowerCamelCase )}.""" )
def __magic_name__ ( _lowerCamelCase: List[str], _lowerCamelCase: int ) -> List[Any]:
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.expand_dims(_lowerCamelCase, _lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.unsqueeze(dim=_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.expand_dims(_lowerCamelCase, axis=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.expand_dims(_lowerCamelCase, axis=_lowerCamelCase )
else:
raise ValueError(F"""Type not supported for expand_dims: {type(_lowerCamelCase )}.""" )
def __magic_name__ ( _lowerCamelCase: Any ) -> Dict:
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.size(_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.numel()
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.size(_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return array.size
else:
raise ValueError(F"""Type not supported for expand_dims: {type(_lowerCamelCase )}.""" )
def __magic_name__ ( _lowerCamelCase: Optional[Any], _lowerCamelCase: Optional[int] ) -> Optional[int]:
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(_lowerCamelCase, (tuple, list) ):
lowerCAmelCase = [F"""{repo_id}--{v}""" if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
lowerCAmelCase = F"""{repo_id}--{value}"""
return auto_map
def __magic_name__ ( _lowerCamelCase: str ) -> Any:
'''simple docstring'''
for base_class in inspect.getmro(_lowerCamelCase ):
lowerCAmelCase = base_class.__module__
lowerCAmelCase = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"""Could not infer framework from class {model_class}.""" )
| 535
| 1
|
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = LayoutLMTokenizer
SCREAMING_SNAKE_CASE__ = LayoutLMTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
def lowerCAmelCase ( self ):
super().setUp()
__UpperCamelCase : List[str] = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowerCAmelCase ( self , **_lowerCamelCase ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def lowerCAmelCase ( self , _lowerCamelCase ):
__UpperCamelCase : Dict = 'UNwant\u00E9d,running'
__UpperCamelCase : Union[str, Any] = 'unwanted, running'
return input_text, output_text
def lowerCAmelCase ( self ):
__UpperCamelCase : Dict = self.tokenizer_class(self.vocab_file )
__UpperCamelCase : Optional[Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_lowerCamelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [7, 4, 5, 1_0, 8, 9] )
def lowerCAmelCase ( self ):
pass
| 703
|
'''simple docstring'''
from timeit import timeit
a= {
'''MALAYALAM''': True,
'''String''': False,
'''rotor''': True,
'''level''': True,
'''A''': True,
'''BB''': True,
'''ABC''': False,
'''amanaplanacanalpanama''': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _UpperCamelCase ( _a : str ):
"""simple docstring"""
__UpperCamelCase : int = 0
__UpperCamelCase : str = len(_a ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _UpperCamelCase ( _a : str ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = len(_a ) // 2
__UpperCamelCase : Tuple = len(_a )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(_a ) )
def _UpperCamelCase ( _a : str ):
"""simple docstring"""
if len(_a ) <= 2:
return True
if s[0] == s[len(_a ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _UpperCamelCase ( _a : str ):
"""simple docstring"""
return s == s[::-1]
def _UpperCamelCase ( _a : str ):
"""simple docstring"""
__UpperCamelCase : int = f"""all({name}(key) is value for key, value in test_data.items())"""
__UpperCamelCase : Union[str, Any] = f"""from __main__ import test_data, {name}"""
__UpperCamelCase : Any = 5_0_0_0_0_0
__UpperCamelCase : Tuple = timeit(stmt=_a , setup=_a , number=_a )
print(f"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F"""{key:21} {value}""")
print('''a man a plan a canal panama''')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('''is_palindrome_slice''')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('''is_palindrome''')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('''is_palindrome_recursive''')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('''is_palindrome_traversal''')
| 287
| 0
|
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __snake_case ( UpperCAmelCase_ : List[Any] ): # picklable for multiprocessing
return x.sum()
def __snake_case ( UpperCAmelCase_ : Any ): # picklable for multiprocessing
return i + 1
@dataclass
class snake_case :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
class snake_case ( lowercase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = {}
lowerCamelCase_ = []
lowerCamelCase_ = 1
lowerCamelCase_ = [1, 2]
lowerCamelCase_ = {"a": 1, "b": 2}
lowerCamelCase_ = {"a": [1, 2], "b": [3, 4]}
lowerCamelCase_ = {"a": {"1": 1}, "b": 2}
lowerCamelCase_ = {"a": 1, "b": 2, "c": 3, "d": 4}
lowerCamelCase_ = {}
lowerCamelCase_ = []
lowerCamelCase_ = 2
lowerCamelCase_ = [2, 3]
lowerCamelCase_ = {"a": 2, "b": 3}
lowerCamelCase_ = {"a": [2, 3], "b": [4, 5]}
lowerCamelCase_ = {"a": {"1": 2}, "b": 3}
lowerCamelCase_ = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(lowercase__ , lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ ) , lowercase__ )
lowerCamelCase_ = 2
self.assertEqual(map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) , lowercase__ )
self.assertEqual(map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) , lowercase__ )
lowerCamelCase_ = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
lowerCamelCase_ = {"a": 2, "b": 0, "c": 2}
lowerCamelCase_ = {
"a": np.eye(2 ).astype(lowercase__ ),
"b": np.zeros(3 ).astype(lowercase__ ),
"c": np.ones(2 ).astype(lowercase__ ),
}
self.assertEqual(map_nested(lowercase__ , lowercase__ , map_numpy=lowercase__ ) , lowercase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowercase__ , lowercase__ , map_numpy=lowercase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(lowercase__ , lowercase__ , map_numpy=lowercase__ , num_proc=lowercase__ ) , lowercase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowercase__ , lowercase__ , map_numpy=lowercase__ , num_proc=lowercase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(lowercase__ ): # can't pickle a local lambda
map_nested(lambda UpperCamelCase : x + 1 , lowercase__ , num_proc=lowercase__ )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = {"a": 1, "b": 2}
lowerCamelCase_ = {"a": 3, "b": 4}
lowerCamelCase_ = {"a": 5, "b": 6}
lowerCamelCase_ = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(lowercase__ , lowercase__ , lowercase__ ) ) , lowercase__ )
def snake_case ( self ):
"""simple docstring"""
class snake_case :
"""simple docstring"""
_lowerCamelCase = "bar"
lowerCamelCase_ = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(lowercase__ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] ):
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
lowerCamelCase_ = {F'''{i}''': i for i in range(__A )}
lowerCamelCase_ = map_nested(lambda UpperCAmelCase_ : x + 10 , __A , num_proc=__A , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class snake_case ( lowercase ):
"""simple docstring"""
@require_tf
def snake_case ( self ):
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
lowerCamelCase_ = layers.Dense(2 )
def gen_random_output():
lowerCamelCase_ = tf.random.uniform((1, 3) )
return model(lowercase__ ).numpy()
with temp_seed(42 , set_tensorflow=lowercase__ ):
lowerCamelCase_ = gen_random_output()
with temp_seed(42 , set_tensorflow=lowercase__ ):
lowerCamelCase_ = gen_random_output()
lowerCamelCase_ = gen_random_output()
np.testing.assert_equal(lowercase__ , lowercase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def snake_case ( self ):
"""simple docstring"""
import torch
def gen_random_output():
lowerCamelCase_ = torch.nn.Linear(3 , 2 )
lowerCamelCase_ = torch.rand(1 , 3 )
return model(lowercase__ ).detach().numpy()
with temp_seed(42 , set_pytorch=lowercase__ ):
lowerCamelCase_ = gen_random_output()
with temp_seed(42 , set_pytorch=lowercase__ ):
lowerCamelCase_ = gen_random_output()
lowerCamelCase_ = gen_random_output()
np.testing.assert_equal(lowercase__ , lowercase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def snake_case ( self ):
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
lowerCamelCase_ = gen_random_output()
with temp_seed(42 ):
lowerCamelCase_ = gen_random_output()
lowerCamelCase_ = gen_random_output()
np.testing.assert_equal(lowercase__ , lowercase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def __snake_case ( UpperCAmelCase_ : List[Any] ):
lowerCamelCase_ = NestedDataStructure(__A ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ):
lowerCamelCase_ = NestedDataStructure(__A ).flatten()
assert output == expected_output
def __snake_case ( ):
lowerCamelCase_ = A(x=1 , y="foobar" )
lowerCamelCase_ = {"x": 1, "y": "foobar"}
assert asdict(__A ) == expected_output
lowerCamelCase_ = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
lowerCamelCase_ = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(__A ) == expected_output
with pytest.raises(__A ):
asdict([1, A(x=10 , y="foo" )] )
def __snake_case ( UpperCAmelCase_ : str ):
return text.split()
def __snake_case ( UpperCAmelCase_ : int ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __snake_case ( ):
with Pool(2 ) as pool:
lowerCamelCase_ = list(iflatmap_unordered(__A , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__A ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
lowerCamelCase_ = list(iflatmap_unordered(__A , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__A ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
lowerCamelCase_ = []
for yield_time, content in iflatmap_unordered(
__A , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__A )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(__A ) == 4
| 675
|
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def A__ ( __A : List[str] ) ->str:
__A =[]
for line in lines:
__A =re.sub(r'''#.*''' , '''''' , __A ) # remove comments
if line:
filtered_lines.append(__A )
__A ='''\n'''.join(__A )
# Make a hash from all this code
__A =full_str.encode('''utf-8''' )
return shaaaa(__A ).hexdigest()
# get importable module names and hash for caching
_lowerCamelCase : Dict = {
'''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_lowerCamelCase : int = {
'''.csv''': ('''csv''', {}),
'''.tsv''': ('''csv''', {'''sep''': '''\t'''}),
'''.json''': ('''json''', {}),
'''.jsonl''': ('''json''', {}),
'''.parquet''': ('''parquet''', {}),
'''.arrow''': ('''arrow''', {}),
'''.txt''': ('''text''', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_lowerCamelCase : List[str] = {'''imagefolder''', '''audiofolder'''}
# Used to filter data files based on extensions given a module name
_lowerCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
| 184
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case = 16
snake_case = 32
def lowerCamelCase__ ( lowercase , lowercase = 16 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE : List[str] = load_dataset("glue" , "mrpc" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE : Dict = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE : List[Any] = datasets.map(
lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE : Dict = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE : Union[str, Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE : Union[str, Any] = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE : Union[str, Any] = 8
else:
SCREAMING_SNAKE_CASE : Tuple = None
return tokenizer.pad(
lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE : Dict = DataLoader(
tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1":
SCREAMING_SNAKE_CASE : List[Any] = 2
# New Code #
SCREAMING_SNAKE_CASE : Optional[Any] = int(args.gradient_accumulation_steps )
SCREAMING_SNAKE_CASE : str = int(args.local_sgd_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE : List[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE : Dict = config["lr"]
SCREAMING_SNAKE_CASE : Union[str, Any] = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE : Any = int(config["seed"] )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE : Dict = evaluate.load("glue" , "mrpc" )
set_seed(lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = get_dataloaders(lowercase , lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE : Dict = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE : Dict = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE : List[str] = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
with LocalSGD(
accelerator=lowercase , model=lowercase , local_sgd_steps=lowercase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = model(**lowercase )
SCREAMING_SNAKE_CASE : str = output.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**lowercase )
SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowercase , references=lowercase , )
SCREAMING_SNAKE_CASE : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowercase )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=lowercase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument(
"--local_sgd_steps" , type=lowercase , default=8 , help="Number of local SGD steps or None to disable local SGD" )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
SCREAMING_SNAKE_CASE : str = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 488
|
from __future__ import annotations
from cmath import sqrt
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
SCREAMING_SNAKE_CASE : List[str] = b * b - 4 * a * c
SCREAMING_SNAKE_CASE : str = (-b + sqrt(lowercase )) / (2 * a)
SCREAMING_SNAKE_CASE : Any = (-b - sqrt(lowercase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = quadratic_roots(a=5 , b=6 , c=1 )
print(F'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 488
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE_: int ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: List[Any] =['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
SCREAMING_SNAKE_CASE_: Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 78
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCAmelCase_ = BitConfig(
conv_layer=snake_case_ , num_labels=10_00 , idalabel=snake_case_ , labelaid=snake_case_ , )
return config
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if "stem.conv" in name:
UpperCAmelCase_ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
UpperCAmelCase_ = name.replace("blocks" , "layers" )
if "head.fc" in name:
UpperCAmelCase_ = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
UpperCAmelCase_ = "bit." + name
if "bit" not in name and "classifier" not in name:
UpperCAmelCase_ = "bit.encoder." + name
return name
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : int=False ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = get_config(snake_case_ )
# load original model from timm
UpperCAmelCase_ = create_model(snake_case_ , pretrained=snake_case_ )
timm_model.eval()
# load state_dict of original model
UpperCAmelCase_ = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val.squeeze() if "head" in key else val
# load HuggingFace model
UpperCAmelCase_ = BitForImageClassification(snake_case_ )
model.eval()
model.load_state_dict(snake_case_ )
# create image processor
UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=snake_case_ ) )
UpperCAmelCase_ = transform.transforms
UpperCAmelCase_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
UpperCAmelCase_ = BitImageProcessor(
do_resize=snake_case_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=snake_case_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 )
UpperCAmelCase_ = processor(snake_case_ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(snake_case_ , snake_case_ )
# verify logits
with torch.no_grad():
UpperCAmelCase_ = model(snake_case_ )
UpperCAmelCase_ = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCAmelCase_ = timm_model(snake_case_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case_ , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 78
| 1
|
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = current_set.copy()
for row_index, row in enumerate(SCREAMING_SNAKE_CASE ):
A_ = row[0]
for column_index, column in enumerate(SCREAMING_SNAKE_CASE ):
if magnitude == 0:
A_ = column
continue
A_ = column / magnitude
# Subtract to cancel term
A_ = current_set[0]
A_ = [first_row]
A_ = current_set[1::]
for row in current_set:
A_ = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(SCREAMING_SNAKE_CASE )
continue
for column_index in range(len(SCREAMING_SNAKE_CASE ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(SCREAMING_SNAKE_CASE )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
A_ = final_set[0]
A_ = []
A_ = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
A_ = simplify(SCREAMING_SNAKE_CASE )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , SCREAMING_SNAKE_CASE )
A_ = resultant
return final_set
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
A_ = len(SCREAMING_SNAKE_CASE ) + 1
if any(len(SCREAMING_SNAKE_CASE ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(SCREAMING_SNAKE_CASE , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(SCREAMING_SNAKE_CASE ) == 1:
return [equations[0][-1] / equations[0][0]]
A_ = equations.copy()
if any(0 in row for row in data_set ):
A_ = data_set.copy()
A_ = []
for row_index, row in enumerate(SCREAMING_SNAKE_CASE ):
if 0 not in row:
A_ = data_set.pop(SCREAMING_SNAKE_CASE )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , SCREAMING_SNAKE_CASE )
A_ = data_set.copy()
A_ = simplify(SCREAMING_SNAKE_CASE )
A_ = simplified[::-1]
A_ = []
for row in simplified:
A_ = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
A_ = row.copy()[: len(SCREAMING_SNAKE_CASE ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(SCREAMING_SNAKE_CASE ) == 0:
solutions.append(0 )
continue
A_ = temp_row[1::]
A_ = temp_row[::-1]
for column_index, column in enumerate(SCREAMING_SNAKE_CASE ):
current_solution -= column * solutions[column_index]
solutions.append(SCREAMING_SNAKE_CASE )
A_ = []
for item in solutions:
final.append(float(round(SCREAMING_SNAKE_CASE , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 563
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class _lowercase ( __lowerCamelCase ):
_lowercase : Optional[int] = 'Wav2Vec2FeatureExtractor'
_lowercase : int = 'AutoTokenizer'
def __init__( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
A_ = self.feature_extractor
A_ = False
@classmethod
def UpperCamelCase ( cls : Optional[int] , lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
try:
return super().from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
except OSError:
warnings.warn(
F"Loading a tokenizer inside {cls.__name__} from a config that does not"
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , lowerCamelCase__ , )
A_ = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
A_ = WavaVecaCTCTokenizer.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
return cls(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
def __call__( self : Union[str, Any] , *lowerCamelCase__ : Any , **lowerCamelCase__ : Any ) -> List[str]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase__ , **lowerCamelCase__ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
A_ = kwargs.pop('''raw_speech''' )
else:
A_ = kwargs.pop('''audio''' , lowerCamelCase__ )
A_ = kwargs.pop('''sampling_rate''' , lowerCamelCase__ )
A_ = kwargs.pop('''text''' , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
A_ = args[0]
A_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
A_ = self.feature_extractor(lowerCamelCase__ , *lowerCamelCase__ , sampling_rate=lowerCamelCase__ , **lowerCamelCase__ )
if text is not None:
A_ = self.tokenizer(lowerCamelCase__ , **lowerCamelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A_ = encodings['''input_ids''']
return inputs
def UpperCamelCase ( self : Any , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCamelCase__ , **lowerCamelCase__ )
A_ = kwargs.pop('''input_features''' , lowerCamelCase__ )
A_ = kwargs.pop('''labels''' , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
A_ = args[0]
A_ = args[1:]
if input_features is not None:
A_ = self.feature_extractor.pad(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
if labels is not None:
A_ = self.tokenizer.pad(lowerCamelCase__ , **lowerCamelCase__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A_ = labels['''input_ids''']
return input_features
def UpperCamelCase ( self : Optional[int] , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCamelCase ( self : Any , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@contextmanager
def UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
A_ = True
A_ = self.tokenizer
yield
A_ = self.feature_extractor
A_ = False
| 563
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[str] = KandinskyVaaImgaImgPipeline
lowercase__ : Any = ["image_embeds", "negative_image_embeds", "image"]
lowercase__ : Union[str, Any] = [
"image_embeds",
"negative_image_embeds",
"image",
]
lowercase__ : List[str] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowercase__ : Union[str, Any] = False
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return 32
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
return 32
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
return self.time_input_dim
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return self.time_input_dim * 4
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
return 1_00
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
torch.manual_seed(0 )
lowerCAmelCase__ = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCAmelCase__ = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
torch.manual_seed(0 )
lowerCAmelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = self.dummy_unet
lowerCAmelCase__ = self.dummy_movq
lowerCAmelCase__ = {
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00_085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowerCAmelCase__ = DDIMScheduler(**lowerCamelCase_ )
lowerCAmelCase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> Dict:
lowerCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
lowerCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('''RGB''' ).resize((2_56, 2_56) )
if str(lowerCamelCase_ ).startswith('''mps''' ):
lowerCAmelCase__ = torch.manual_seed(lowerCamelCase_ )
else:
lowerCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCAmelCase__ = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = '''cpu'''
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**lowerCamelCase_ )
lowerCAmelCase__ = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ) , return_dict=lowerCamelCase_ , )[0]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.6_199_778, 0.63_984_406, 0.46_145_785, 0.62_944_984, 0.5_622_215, 0.47_306_132, 0.47_441_456, 0.4_607_606, 0.48_719_263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowerCAmelCase__ = '''A red cartoon frog, 4k'''
lowerCAmelCase__ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
lowerCAmelCase__ = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
lowerCAmelCase__ = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCAmelCase__ , lowerCAmelCase__ = pipe_prior(
lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowerCAmelCase__ = pipeline(
image=lowerCamelCase_ , image_embeds=lowerCamelCase_ , negative_image_embeds=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , )
lowerCAmelCase__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
| 90
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCamelCase_ = logging.get_logger(__name__)
def __magic_name__ ( __a : List[Any] , __a : Optional[int] , __a : Optional[int] ):
'''simple docstring'''
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def __magic_name__ ( __a : np.ndarray , __a : Optional[str] , __a : Optional[str] = None ):
'''simple docstring'''
UpperCamelCase__ = tesseract_config if tesseract_config is not None else """"""
# apply OCR
UpperCamelCase__ = to_pil_image(__a )
UpperCamelCase__ , UpperCamelCase__ = pil_image.size
UpperCamelCase__ = pytesseract.image_to_data(__a , lang=__a , output_type="""dict""" , config=__a )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
UpperCamelCase__ = [idx for idx, word in enumerate(__a ) if not word.strip()]
UpperCamelCase__ = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices]
UpperCamelCase__ = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
UpperCamelCase__ = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
UpperCamelCase__ = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
UpperCamelCase__ = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCamelCase__ = []
for x, y, w, h in zip(__a , __a , __a , __a ):
UpperCamelCase__ = [x, y, x + w, y + h]
actual_boxes.append(__a )
# finally, normalize the bounding boxes
UpperCamelCase__ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__a , __a , __a ) )
assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""pixel_values"""]
def __init__(self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "" , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = size if size is not None else {"""height""": 2_24, """width""": 2_24}
UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = resample
UpperCamelCase__ = apply_ocr
UpperCamelCase__ = ocr_lang
UpperCamelCase__ = tesseract_config
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
UpperCamelCase__ = (size["""height"""], size["""width"""])
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCamelCase__ = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCamelCase__ = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCamelCase__ = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
UpperCamelCase__ = []
UpperCamelCase__ = []
for image in images:
UpperCamelCase__ , UpperCamelCase__ = apply_tesseract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
words_batch.append(SCREAMING_SNAKE_CASE_ )
boxes_batch.append(SCREAMING_SNAKE_CASE_ )
if do_resize:
UpperCamelCase__ = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
UpperCamelCase__ = [flip_channel_order(SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCamelCase__ = BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE_ )
if apply_ocr:
UpperCamelCase__ = words_batch
UpperCamelCase__ = boxes_batch
return data
| 513
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase )
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_UpperCamelCase : ClassVar[Features] = Features({'audio': Audio()} )
_UpperCamelCase : ClassVar[Features] = Features({'labels': ClassLabel} )
_UpperCamelCase : str = "audio"
_UpperCamelCase : str = "labels"
def snake_case__ ( self , snake_case ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , snake_case ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
UpperCamelCase__ = copy.deepcopy(self )
UpperCamelCase__ = self.label_schema.copy()
UpperCamelCase__ = features[self.label_column]
UpperCamelCase__ = label_schema
return task_template
@property
def snake_case__ ( self ):
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 185
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : int = 'data2vec-text'
def __init__( self , snake_case=30522 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=1E-1_2 , snake_case=1 , snake_case=0 , snake_case=2 , snake_case="absolute" , snake_case=True , snake_case=None , **snake_case , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = use_cache
UpperCamelCase__ = classifier_dropout
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
@property
def snake_case__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 185
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class SCREAMING_SNAKE_CASE ( snake_case__ ):
snake_case__ : "DiagonalGaussianDistribution"
class SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ ):
snake_case__ : Union[str, Any] = True
@register_to_config
def __init__( self : Optional[int] , A__ : int = 3 , A__ : int = 3 , A__ : Tuple[str] = ("DownEncoderBlock2D",) , A__ : Tuple[str] = ("UpDecoderBlock2D",) , A__ : Tuple[int] = (64,) , A__ : int = 1 , A__ : str = "silu" , A__ : int = 4 , A__ : int = 32 , A__ : int = 32 , A__ : float = 0.1_8215 , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
__lowerCamelCase : Tuple = Encoder(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , down_block_types=SCREAMING_SNAKE_CASE__ , block_out_channels=SCREAMING_SNAKE_CASE__ , layers_per_block=SCREAMING_SNAKE_CASE__ , act_fn=SCREAMING_SNAKE_CASE__ , norm_num_groups=SCREAMING_SNAKE_CASE__ , double_z=SCREAMING_SNAKE_CASE__ , )
# pass init params to Decoder
__lowerCamelCase : List[str] = Decoder(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , up_block_types=SCREAMING_SNAKE_CASE__ , block_out_channels=SCREAMING_SNAKE_CASE__ , layers_per_block=SCREAMING_SNAKE_CASE__ , norm_num_groups=SCREAMING_SNAKE_CASE__ , act_fn=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase : Dict = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__lowerCamelCase : int = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
__lowerCamelCase : Any = False
__lowerCamelCase : int = False
# only relevant if vae tiling is enabled
__lowerCamelCase : int = self.config.sample_size
__lowerCamelCase : Optional[int] = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__lowerCamelCase : int = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__lowerCamelCase : Union[str, Any] = 0.25
def a_ ( self : List[Any] , A__ : Optional[Any] , A__ : List[Any]=False ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__ , (Encoder, Decoder) ):
__lowerCamelCase : Union[str, Any] = value
def a_ ( self : List[Any] , A__ : bool = True ):
"""simple docstring"""
__lowerCamelCase : List[str] = use_tiling
def a_ ( self : int ):
"""simple docstring"""
self.enable_tiling(SCREAMING_SNAKE_CASE__ )
def a_ ( self : int ):
"""simple docstring"""
__lowerCamelCase : List[str] = True
def a_ ( self : int ):
"""simple docstring"""
__lowerCamelCase : Any = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a_ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase : Optional[int] = {}
def fn_recursive_add_processors(A__ : str , A__ : torch.nn.Module , A__ : Dict[str, AttentionProcessor] ):
if hasattr(SCREAMING_SNAKE_CASE__ , """set_processor""" ):
__lowerCamelCase : Dict = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return processors
def a_ ( self : str , A__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
"""simple docstring"""
__lowerCamelCase : Any = len(self.attn_processors.keys() )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(SCREAMING_SNAKE_CASE__ )} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(A__ : str , A__ : torch.nn.Module , A__ : str ):
if hasattr(SCREAMING_SNAKE_CASE__ , """set_processor""" ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
module.set_processor(SCREAMING_SNAKE_CASE__ )
else:
module.set_processor(processor.pop(f"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for name, module in self.named_children():
fn_recursive_attn_processor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a_ ( self : List[str] ):
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def a_ ( self : List[Any] , A__ : torch.FloatTensor , A__ : bool = True ):
"""simple docstring"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
if self.use_slicing and x.shape[0] > 1:
__lowerCamelCase : Optional[int] = [self.encoder(SCREAMING_SNAKE_CASE__ ) for x_slice in x.split(1 )]
__lowerCamelCase : List[str] = torch.cat(SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase : Optional[Any] = self.encoder(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : int = self.quant_conv(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Any = DiagonalGaussianDistribution(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=SCREAMING_SNAKE_CASE__ )
def a_ ( self : str , A__ : torch.FloatTensor , A__ : bool = True ):
"""simple docstring"""
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : List[str] = self.post_quant_conv(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : List[str] = self.decoder(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE__ )
@apply_forward_hook
def a_ ( self : Optional[Any] , A__ : torch.FloatTensor , A__ : bool = True ):
"""simple docstring"""
if self.use_slicing and z.shape[0] > 1:
__lowerCamelCase : List[str] = [self._decode(SCREAMING_SNAKE_CASE__ ).sample for z_slice in z.split(1 )]
__lowerCamelCase : Any = torch.cat(SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase : List[Any] = self._decode(SCREAMING_SNAKE_CASE__ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE__ )
def a_ ( self : Any , A__ : Union[str, Any] , A__ : List[Any] , A__ : str ):
"""simple docstring"""
__lowerCamelCase : int = min(a.shape[2] , b.shape[2] , SCREAMING_SNAKE_CASE__ )
for y in range(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def a_ ( self : Optional[Any] , A__ : List[Any] , A__ : List[str] , A__ : Tuple ):
"""simple docstring"""
__lowerCamelCase : str = min(a.shape[3] , b.shape[3] , SCREAMING_SNAKE_CASE__ )
for x in range(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Union[str, Any] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def a_ ( self : int , A__ : torch.FloatTensor , A__ : bool = True ):
"""simple docstring"""
__lowerCamelCase : Tuple = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__lowerCamelCase : str = int(self.tile_latent_min_size * self.tile_overlap_factor )
__lowerCamelCase : Optional[int] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__lowerCamelCase : str = []
for i in range(0 , x.shape[2] , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[str] = []
for j in range(0 , x.shape[3] , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__lowerCamelCase : Tuple = self.encoder(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = self.quant_conv(SCREAMING_SNAKE_CASE__ )
row.append(SCREAMING_SNAKE_CASE__ )
rows.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Union[str, Any] = []
for i, row in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Union[str, Any] = []
for j, tile in enumerate(SCREAMING_SNAKE_CASE__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCamelCase : Any = self.blend_v(rows[i - 1][j] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if j > 0:
__lowerCamelCase : int = self.blend_h(row[j - 1] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(SCREAMING_SNAKE_CASE__ , dim=3 ) )
__lowerCamelCase : Optional[Any] = torch.cat(SCREAMING_SNAKE_CASE__ , dim=2 )
__lowerCamelCase : Any = DiagonalGaussianDistribution(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=SCREAMING_SNAKE_CASE__ )
def a_ ( self : Optional[Any] , A__ : torch.FloatTensor , A__ : bool = True ):
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__lowerCamelCase : Any = int(self.tile_sample_min_size * self.tile_overlap_factor )
__lowerCamelCase : int = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__lowerCamelCase : Union[str, Any] = []
for i in range(0 , z.shape[2] , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[str] = []
for j in range(0 , z.shape[3] , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Optional[int] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__lowerCamelCase : Union[str, Any] = self.post_quant_conv(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Union[str, Any] = self.decoder(SCREAMING_SNAKE_CASE__ )
row.append(SCREAMING_SNAKE_CASE__ )
rows.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Dict = []
for i, row in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[Any] = []
for j, tile in enumerate(SCREAMING_SNAKE_CASE__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCamelCase : Tuple = self.blend_v(rows[i - 1][j] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if j > 0:
__lowerCamelCase : Optional[int] = self.blend_h(row[j - 1] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(SCREAMING_SNAKE_CASE__ , dim=3 ) )
__lowerCamelCase : str = torch.cat(SCREAMING_SNAKE_CASE__ , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE__ )
def a_ ( self : int , A__ : torch.FloatTensor , A__ : bool = False , A__ : bool = True , A__ : Optional[torch.Generator] = None , ):
"""simple docstring"""
__lowerCamelCase : str = sample
__lowerCamelCase : Union[str, Any] = self.encode(SCREAMING_SNAKE_CASE__ ).latent_dist
if sample_posterior:
__lowerCamelCase : List[Any] = posterior.sample(generator=SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase : List[str] = posterior.mode()
__lowerCamelCase : Tuple = self.decode(SCREAMING_SNAKE_CASE__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE__ )
| 150
|
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : NestedDataStructureLike[PathLike] , SCREAMING_SNAKE_CASE__ : Optional[NamedSplit] = None , SCREAMING_SNAKE_CASE__ : Optional[Features] = None , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ , streaming=SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__a = field
__a = path_or_paths if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else {self.split: path_or_paths}
__a = Json(
cache_dir=SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , field=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __a ( self : Dict ):
'''simple docstring'''
if self.streaming:
__a = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__a = None
__a = None
__a = None
__a = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ , download_mode=SCREAMING_SNAKE_CASE__ , verification_mode=SCREAMING_SNAKE_CASE__ , base_path=SCREAMING_SNAKE_CASE__ , num_proc=self.num_proc , )
__a = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE__ , in_memory=self.keep_in_memory )
return dataset
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Dataset , SCREAMING_SNAKE_CASE__ : Union[PathLike, BinaryIO] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
__a = dataset
__a = path_or_buf
__a = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__a = num_proc
__a = """utf-8"""
__a = to_json_kwargs
def __a ( self : int ):
'''simple docstring'''
__a = self.to_json_kwargs.pop("""path_or_buf""" , SCREAMING_SNAKE_CASE__ )
__a = self.to_json_kwargs.pop("""orient""" , """records""" )
__a = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
__a = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
__a = self.to_json_kwargs.pop("""compression""" , SCREAMING_SNAKE_CASE__ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=SCREAMING_SNAKE_CASE__ ) as buffer:
__a = self._write(file_obj=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
""" was passed. Please provide a local path instead.""" )
__a = self._write(
file_obj=self.path_or_buf , orient=SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ , **self.to_json_kwargs )
return written
def __a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
__a , __a , __a , __a , __a = args
__a = query_table(
table=self.dataset.data , key=slice(SCREAMING_SNAKE_CASE__ , offset + self.batch_size ) , indices=self.dataset._indices , )
__a = batch.to_pandas().to_json(
path_or_buf=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def __a ( self : Tuple , SCREAMING_SNAKE_CASE__ : BinaryIO , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
'''simple docstring'''
__a = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
__a = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(SCREAMING_SNAKE_CASE__ )
else:
__a , __a = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(SCREAMING_SNAKE_CASE__ )
return written
| 582
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : str ={
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] =[
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
_A : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ) -> Tuple:
'''simple docstring'''
_lowercase : int = parent
_lowercase : str = batch_size
_lowercase : List[str] = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_attention_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Dict = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Any = hidden_act
_lowercase : List[str] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Any = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : str = num_choices
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : int = None
if self.use_attention_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Any = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs
_lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = True
A_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Tuple = FlaxRoFormerModelTester(self )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowercase : Optional[int] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase_ )
_lowercase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_lowercase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] )
_lowercase : int = model(UpperCamelCase_ )[0]
_lowercase : Union[str, Any] = 5_0000
_lowercase : str = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCamelCase_ )
_lowercase : int = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 4
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self : int , a_ : Any , a_ : Tuple=7 , a_ : Dict=3 , a_ : Any=18 , a_ : Tuple=30 , a_ : Optional[Any]=400 , a_ : Union[str, Any]=True , a_ : List[Any]=32 , a_ : int=True , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size_divisor
__snake_case = do_rescale
def A ( self : Tuple ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = GLPNImageProcessor if is_vision_available() else None
def A ( self : Any ):
"""simple docstring"""
__snake_case = GLPNImageProcessingTester(self )
@property
def A ( self : List[str] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , "do_resize" ) )
self.assertTrue(hasattr(a_ , "size_divisor" ) )
self.assertTrue(hasattr(a_ , "resample" ) )
self.assertTrue(hasattr(a_ , "do_rescale" ) )
def A ( self : List[str] ):
"""simple docstring"""
pass
def A ( self : Dict ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 69
|
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
__snake_case , __snake_case = 1, 1
for _ in range(number_of_steps - 1 ):
__snake_case , __snake_case = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "xlm-roberta"
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]=3_0522 , __SCREAMING_SNAKE_CASE : Any=768 , __SCREAMING_SNAKE_CASE : Dict=12 , __SCREAMING_SNAKE_CASE : Dict=12 , __SCREAMING_SNAKE_CASE : Any=3072 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : str=512 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=1e-12 , __SCREAMING_SNAKE_CASE : Tuple=1 , __SCREAMING_SNAKE_CASE : Tuple=0 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : Optional[int]="absolute" , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> List[Any]:
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : int = vocab_size
a_ : Optional[Any] = hidden_size
a_ : Union[str, Any] = num_hidden_layers
a_ : Optional[int] = num_attention_heads
a_ : Tuple = hidden_act
a_ : Optional[int] = intermediate_size
a_ : Dict = hidden_dropout_prob
a_ : Optional[int] = attention_probs_dropout_prob
a_ : List[Any] = max_position_embeddings
a_ : Union[str, Any] = type_vocab_size
a_ : Tuple = initializer_range
a_ : List[str] = layer_norm_eps
a_ : Tuple = position_embedding_type
a_ : Tuple = use_cache
a_ : Optional[int] = classifier_dropout
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a_ : List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
a_ : str = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 666
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__lowerCAmelCase = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
__lowerCAmelCase = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
__lowerCAmelCase = '▁'
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict="<s>" , __SCREAMING_SNAKE_CASE : List[Any]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : List[str]="<s>" , __SCREAMING_SNAKE_CASE : Dict="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Tuple="<mask>" , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
a_ : Tuple = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
a_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
a_ : Tuple = vocab_file
a_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
a_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
a_ : Any = len(self.sp_model ) - 1
a_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ : List[str] = [self.cls_token_id]
a_ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
a_ : List[str] = [self.sep_token_id]
a_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
a_ : int = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a_ : Optional[int] = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
a_ : Dict = []
a_ : List[Any] = ''''''
a_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
a_ : Dict = True
a_ : Optional[Any] = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
a_ : Tuple = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def __getstate__( self : Dict ) -> int:
a_ : Dict = self.__dict__.copy()
a_ : List[str] = None
return state
def __setstate__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
a_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a_ : Union[str, Any] = {}
a_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
a_ : Union[str, Any] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
a_ : Any = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 666
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.