code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowercase (SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
"""decoder.output_projection.weight""",
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase (SCREAMING_SNAKE_CASE_ : Dict ) -> str:
SCREAMING_SNAKE_CASE = emb.weight.shape
SCREAMING_SNAKE_CASE = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def lowercase (SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int]="facebook/mbart-large-en-ro" , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Any=False ) -> List[str]:
SCREAMING_SNAKE_CASE = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )["""model"""]
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = state_dict["""encoder.embed_tokens.weight"""].shape[0]
SCREAMING_SNAKE_CASE = MBartConfig.from_pretrained(_SCREAMING_SNAKE_CASE , vocab_size=_SCREAMING_SNAKE_CASE )
if mbart_aa and finetuned:
SCREAMING_SNAKE_CASE = """relu"""
SCREAMING_SNAKE_CASE = state_dict["""decoder.embed_tokens.weight"""]
SCREAMING_SNAKE_CASE = MBartForConditionalGeneration(_SCREAMING_SNAKE_CASE )
model.model.load_state_dict(_SCREAMING_SNAKE_CASE )
if finetuned:
SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 247
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class UpperCamelCase ( __a ):
a__ :List[str] = '''megatron-bert'''
def __init__(self , __UpperCamelCase=29_056 , __UpperCamelCase=1_024 , __UpperCamelCase=24 , __UpperCamelCase=16 , __UpperCamelCase=4_096 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-1_2 , __UpperCamelCase=0 , __UpperCamelCase="absolute" , __UpperCamelCase=True , **__UpperCamelCase , ) -> List[Any]:
super().__init__(pad_token_id=__UpperCamelCase , **__UpperCamelCase )
UpperCamelCase_ : Union[str, Any] = vocab_size
UpperCamelCase_ : Union[str, Any] = hidden_size
UpperCamelCase_ : Any = num_hidden_layers
UpperCamelCase_ : str = num_attention_heads
UpperCamelCase_ : List[Any] = hidden_act
UpperCamelCase_ : Dict = intermediate_size
UpperCamelCase_ : Optional[int] = hidden_dropout_prob
UpperCamelCase_ : Dict = attention_probs_dropout_prob
UpperCamelCase_ : Any = max_position_embeddings
UpperCamelCase_ : str = type_vocab_size
UpperCamelCase_ : Tuple = initializer_range
UpperCamelCase_ : List[str] = layer_norm_eps
UpperCamelCase_ : Tuple = position_embedding_type
UpperCamelCase_ : Tuple = use_cache
| 635
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class lowerCamelCase (__UpperCAmelCase ):
_lowercase : Dict = """roberta-prelayernorm"""
def __init__( self , lowercase__=50_265 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=2 , lowercase__=0.02 , lowercase__=1E-1_2 , lowercase__=1 , lowercase__=0 , lowercase__=2 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
_snake_case : Any = vocab_size
_snake_case : str = hidden_size
_snake_case : Union[str, Any] = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : str = hidden_act
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Optional[int] = hidden_dropout_prob
_snake_case : Dict = attention_probs_dropout_prob
_snake_case : Any = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : List[str] = initializer_range
_snake_case : List[Any] = layer_norm_eps
_snake_case : Optional[int] = position_embedding_type
_snake_case : int = use_cache
_snake_case : List[Any] = classifier_dropout
class lowerCamelCase (__UpperCAmelCase ):
@property
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
if self.task == "multiple-choice":
_snake_case : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_snake_case : List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 716
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCamelCase :
_lowercase : Any = LEDConfig
_lowercase : Any = {}
_lowercase : Optional[Any] = """gelu"""
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=20 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=4 , ) -> Any:
"""simple docstring"""
_snake_case : Dict = parent
_snake_case : Any = batch_size
_snake_case : List[str] = seq_length
_snake_case : Union[str, Any] = is_training
_snake_case : Tuple = use_labels
_snake_case : int = vocab_size
_snake_case : str = hidden_size
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : List[Any] = num_attention_heads
_snake_case : Optional[int] = intermediate_size
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : Optional[int] = max_position_embeddings
_snake_case : Any = eos_token_id
_snake_case : List[Any] = pad_token_id
_snake_case : Optional[int] = bos_token_id
_snake_case : Any = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case : Any = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case : Tuple = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case : Dict = prepare_led_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
_snake_case : Dict = tf.concat(
[tf.zeros_like(lowercase__ )[:, :-1], tf.ones_like(lowercase__ )[:, -1:]] , axis=-1 , )
_snake_case : Dict = global_attention_mask
return config, inputs_dict
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> int:
"""simple docstring"""
_snake_case : int = TFLEDModel(config=lowercase__ ).get_decoder()
_snake_case : Union[str, Any] = inputs_dict['''input_ids''']
_snake_case : List[str] = input_ids[:1, :]
_snake_case : Tuple = inputs_dict['''attention_mask'''][:1, :]
_snake_case : Dict = 1
# first forward pass
_snake_case : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__ )
_snake_case , _snake_case : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : List[Any] = model(lowercase__ , attention_mask=lowercase__ )[0]
_snake_case : Tuple = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : int = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1E-3 )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
"""simple docstring"""
if attention_mask is None:
_snake_case : Union[str, Any] = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCamelCase (a__ , a__ , unittest.TestCase ):
_lowercase : Optional[int] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowercase : int = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowercase : Dict = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowercase : int = True
_lowercase : List[Any] = False
_lowercase : str = False
_lowercase : Union[str, Any] = False
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_snake_case : str = TFLEDModelTester(self )
_snake_case : Union[str, Any] = ConfigTester(self , config_class=lowercase__ )
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Any = tf.zeros_like(inputs_dict['''attention_mask'''] )
_snake_case : Optional[Any] = 2
_snake_case : Any = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
_snake_case : Dict = True
_snake_case : str = self.model_tester.seq_length
_snake_case : Dict = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase__ ):
_snake_case : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase__ ):
_snake_case : int = [t.numpy() for t in outputs.encoder_attentions]
_snake_case : Tuple = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = True
_snake_case : Dict = False
_snake_case : Union[str, Any] = False
_snake_case : List[Any] = model_class(lowercase__ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
_snake_case : List[Any] = len(lowercase__ )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
if self.is_encoder_decoder:
_snake_case : Union[str, Any] = model_class(lowercase__ )
_snake_case : List[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_decoder_attentions_output(lowercase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case : str = True
_snake_case : Tuple = model_class(lowercase__ )
_snake_case : int = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
# Check attention is always last and order is fine
_snake_case : int = True
_snake_case : List[str] = True
_snake_case : Tuple = model_class(lowercase__ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase__ ) )
self.assertEqual(model.config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
pass
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
return tf.constant(lowerCAmelCase_ , dtype=tf.intaa )
UpperCAmelCase : Dict = 1E-4
@slow
@require_tf
class lowerCamelCase (unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
_snake_case : List[str] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
_snake_case : List[str] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Tuple = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
_snake_case : int = model(**lowercase__ )[0]
_snake_case : Dict = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
_snake_case : List[Any] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Any = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
_snake_case : Dict = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Dict = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : List[str] = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
_snake_case : Tuple = model(**lowercase__ )[0]
_snake_case : Any = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
_snake_case : Dict = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 , rtol=1E-3 )
| 47
| 0
|
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 287
|
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
A : Union[str, Any] = pd.read_csv(
'''https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'''
'''position_salaries.csv'''
)
A : Union[str, Any] = dataset.iloc[:, 1:2].values
A : Dict = dataset.iloc[:, 2].values
A , A , A , A : str = train_test_split(X, y, test_size=0.2, random_state=0)
A : Union[str, Any] = PolynomialFeatures(degree=4)
A : str = poly_reg.fit_transform(X)
A : Union[str, Any] = LinearRegression()
pol_reg.fit(X_poly, y)
def UpperCamelCase__ ( ) -> Any:
plt.scatter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , color="""red""" )
plt.plot(SCREAMING_SNAKE_CASE_ , pol_reg.predict(poly_reg.fit_transform(SCREAMING_SNAKE_CASE_ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 287
| 1
|
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=_A ):
"""simple docstring"""
A = ['''note_seq''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(self , ["note_seq"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
@classmethod
def snake_case_ ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
| 111
|
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_a : Optional[Any] = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def snake_case__ ( UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any]=None ):
require_version(deps[pkg] , UpperCAmelCase )
| 111
| 1
|
import os
import numpy
import onnx
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = a.name
SCREAMING_SNAKE_CASE__ = b.name
SCREAMING_SNAKE_CASE__ = """"""
SCREAMING_SNAKE_CASE__ = """"""
SCREAMING_SNAKE_CASE__ = a == b
SCREAMING_SNAKE_CASE__ = name_a
SCREAMING_SNAKE_CASE__ = name_b
return res
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCamelCase__ , UpperCamelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCamelCase__ , UpperCamelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple ):
for n in graph_proto.node:
_node_replace_input_with(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = list(model.graph.initializer )
SCREAMING_SNAKE_CASE__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
SCREAMING_SNAKE_CASE__ = inits[i].name
SCREAMING_SNAKE_CASE__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] ):
SCREAMING_SNAKE_CASE__ = os.path.dirname(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = os.path.basename(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = onnx.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE__ = list(model.graph.initializer )
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
for i in range(len(UpperCamelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCamelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCamelCase__ )
dup_set.add(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = inits[j].data_type
SCREAMING_SNAKE_CASE__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , UpperCamelCase__ )
total_reduced_size += mem_size
SCREAMING_SNAKE_CASE__ = inits[i].name
SCREAMING_SNAKE_CASE__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1_024 / 1_024 / 1_024 , """GB""" )
SCREAMING_SNAKE_CASE__ = sorted(UpperCamelCase__ )
_remove_dup_initializers_from_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = """optimized_""" + model_file_name
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
onnx.save(UpperCamelCase__ , UpperCamelCase__ )
return new_model
| 6
|
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
def __init__( self : Tuple , a_ : int , a_ : Optional[int]=3 , a_ : Tuple=32 , a_ : Any=3 , a_ : Tuple=10 , a_ : Optional[int]=[10, 20, 30, 40] , a_ : List[Any]=[1, 1, 2, 1] , a_ : int=True , a_ : Optional[Any]=True , a_ : Any="relu" , a_ : int=3 , a_ : List[Any]=None , )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = parent
SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE__ : int = image_size
SCREAMING_SNAKE_CASE__ : Tuple = num_channels
SCREAMING_SNAKE_CASE__ : Tuple = embeddings_size
SCREAMING_SNAKE_CASE__ : str = hidden_sizes
SCREAMING_SNAKE_CASE__ : Optional[int] = depths
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = num_labels
SCREAMING_SNAKE_CASE__ : List[Any] = scope
SCREAMING_SNAKE_CASE__ : str = len(a_ )
def __lowercase( self : Union[str, Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Tuple = self.get_config()
return config, pixel_values, labels
def __lowercase( self : str )-> str:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __lowercase( self : List[str] , a_ : int , a_ : Any , a_ : Optional[Any] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFRegNetModel(config=a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , training=a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowercase( self : Union[str, Any] , a_ : Dict , a_ : int , a_ : Optional[Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.num_labels
SCREAMING_SNAKE_CASE__ : Tuple = TFRegNetForImageClassification(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ , training=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase( self : List[str] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowercase_ = (
{'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowercase( self : int )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = TFRegNetModelTester(self )
SCREAMING_SNAKE_CASE__ : int = ConfigTester(self , config_class=a_ , has_text_modality=a_ )
def __lowercase( self : List[Any] )-> Tuple:
"""simple docstring"""
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def __lowercase( self : str )-> Optional[int]:
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def __lowercase( self : Any )-> List[Any]:
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def __lowercase( self : Any )-> List[Any]:
"""simple docstring"""
pass
def __lowercase( self : Tuple )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : List[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a_ )
def __lowercase( self : str )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def __lowercase( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(a_ : int , a_ : Union[str, Any] , a_ : Tuple ):
SCREAMING_SNAKE_CASE__ : Any = model_class(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**self._prepare_for_class(a_ , a_ ) , training=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(a_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Dict = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE__ : List[Any] = layer_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : int = True
check_hidden_states_output(a_ , a_ , a_ )
def __lowercase( self : Optional[int] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(a_ : str , a_ : Tuple , a_ : Optional[int] , a_ : Union[str, Any]={} ):
SCREAMING_SNAKE_CASE__ : int = model(a_ , return_dict=a_ , **a_ )
SCREAMING_SNAKE_CASE__ : str = model(a_ , return_dict=a_ , **a_ ).to_tuple()
def recursive_check(a_ : List[Any] , a_ : int ):
if isinstance(a_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(a_ , a_ ):
recursive_check(a_ , a_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(a_ , a_ ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(a_ , a_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ )
SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(a_ , a_ )
check_equivalence(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE__ : List[str] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
check_equivalence(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE__ : str = self._prepare_for_class(a_ , a_ )
SCREAMING_SNAKE_CASE__ : List[str] = self._prepare_for_class(a_ , a_ )
check_equivalence(a_ , a_ , a_ , {'output_hidden_states': True} )
SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(a_ , a_ , return_labels=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
check_equivalence(a_ , a_ , a_ , {'output_hidden_states': True} )
def __lowercase( self : str )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def __lowercase( self : Any )-> List[str]:
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[int] = TFRegNetModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def __lowercase( self : List[Any] )-> int:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowercase( self : Any )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
SCREAMING_SNAKE_CASE__ : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Any = prepare_img()
SCREAMING_SNAKE_CASE__ : str = image_processor(images=a_ , return_tensors='tf' )
# forward pass
SCREAMING_SNAKE_CASE__ : Tuple = model(**a_ , training=a_ )
# verify the logits
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , a_ )
SCREAMING_SNAKE_CASE__ : Any = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , a_ , atol=1e-4 )
| 85
| 0
|
'''simple docstring'''
lowerCAmelCase__ : List[str] = {
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 502
|
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase__ : Optional[int] = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = """bart"""
__UpperCAmelCase = ["""past_key_values"""]
__UpperCAmelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[Any] , snake_case_ : Tuple=5_0_2_6_5 , snake_case_ : Dict=1_0_2_4 , snake_case_ : int=1_2 , snake_case_ : int=4_0_9_6 , snake_case_ : str=1_6 , snake_case_ : List[Any]=1_2 , snake_case_ : List[Any]=4_0_9_6 , snake_case_ : Any=1_6 , snake_case_ : str=0.0 , snake_case_ : Optional[int]=0.0 , snake_case_ : List[Any]="gelu" , snake_case_ : List[Any]=1_0_2_4 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : int=0.0 , snake_case_ : Tuple=0.0 , snake_case_ : Optional[Any]=0.0_2 , snake_case_ : Dict=0.0 , snake_case_ : str=False , snake_case_ : Optional[int]=True , snake_case_ : Any=3 , snake_case_ : int=1 , snake_case_ : int=0 , snake_case_ : Optional[Any]=2 , snake_case_ : str=True , snake_case_ : int=2 , snake_case_ : Union[str, Any]=2 , **snake_case_ : int , ):
'''simple docstring'''
snake_case__ : Union[str, Any] = vocab_size
snake_case__ : int = max_position_embeddings
snake_case__ : List[str] = d_model
snake_case__ : Optional[int] = encoder_ffn_dim
snake_case__ : Union[str, Any] = encoder_layers
snake_case__ : Tuple = encoder_attention_heads
snake_case__ : List[Any] = decoder_ffn_dim
snake_case__ : Optional[Any] = decoder_layers
snake_case__ : Tuple = decoder_attention_heads
snake_case__ : Any = dropout
snake_case__ : str = attention_dropout
snake_case__ : Optional[int] = activation_dropout
snake_case__ : Tuple = activation_function
snake_case__ : Optional[int] = init_std
snake_case__ : Optional[Any] = encoder_layerdrop
snake_case__ : Tuple = decoder_layerdrop
snake_case__ : Any = classifier_dropout
snake_case__ : List[str] = use_cache
snake_case__ : Tuple = encoder_layers
snake_case__ : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , **snake_case_ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , snake_case_ ):
snake_case__ : Union[str, Any] = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'''The config can simply be saved and uploaded again to be fixed.''' )
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : Optional[int] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case__ : List[str] = {0: '''batch'''}
snake_case__ : int = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
snake_case__ : List[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
snake_case__ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case__ : int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case__ , snake_case__ : Tuple = self.num_layers
for i in range(snake_case_ ):
snake_case__ : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case__ : Tuple = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
snake_case__ : List[str] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : str = super().outputs
else:
snake_case__ : List[Any] = super(snake_case_ , self ).outputs
if self.use_past:
snake_case__ , snake_case__ : Dict = self.num_layers
for i in range(snake_case_ ):
snake_case__ : Any = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case__ : Optional[int] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __magic_name__ ( self : Optional[Any] , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ):
'''simple docstring'''
snake_case__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Generate decoder inputs
snake_case__ : Dict = seq_length if not self.use_past else 1
snake_case__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
snake_case__ : List[str] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
snake_case__ : str = dict(**snake_case_ , **snake_case_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case__ , snake_case__ : str = common_inputs['''input_ids'''].shape
snake_case__ : Dict = common_inputs['''decoder_input_ids'''].shape[1]
snake_case__ , snake_case__ : Optional[Any] = self.num_attention_heads
snake_case__ : Tuple = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case__ : Optional[Any] = decoder_seq_length + 3
snake_case__ : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case__ : Optional[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(snake_case_ , snake_case_ )] , dim=1 )
snake_case__ : Any = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case__ , snake_case__ : str = self.num_layers
snake_case__ : Any = min(snake_case_ , snake_case_ )
snake_case__ : Union[str, Any] = max(snake_case_ , snake_case_ ) - min_num_layers
snake_case__ : Optional[int] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(snake_case_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
) )
# TODO: test this.
snake_case__ : int = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(snake_case_ , snake_case_ ):
common_inputs["past_key_values"].append((torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) )
return common_inputs
def __magic_name__ ( self : List[str] , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ):
'''simple docstring'''
snake_case__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case__ , snake_case__ : Dict = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
snake_case__ : str = seqlen + 2
snake_case__ , snake_case__ : Union[str, Any] = self.num_layers
snake_case__ , snake_case__ : Optional[int] = self.num_attention_heads
snake_case__ : str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case__ : List[Any] = common_inputs['''attention_mask'''].dtype
snake_case__ : List[Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(snake_case_ , snake_case_ , dtype=snake_case_ )] , dim=1 )
snake_case__ : Optional[Any] = [
(torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) for _ in range(snake_case_ )
]
return common_inputs
def __magic_name__ ( self : int , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ):
'''simple docstring'''
snake_case__ : str = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case__ : List[Any] = tokenizer.num_special_tokens_to_add(snake_case_ )
snake_case__ : List[str] = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
snake_case__ : Any = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case__ : Any = dict(tokenizer(snake_case_ , return_tensors=snake_case_ ) )
return common_inputs
def __magic_name__ ( self : Optional[Any] , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : int = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
elif self.task == "causal-lm":
snake_case__ : Optional[Any] = self._generate_dummy_inputs_for_causal_lm(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
else:
snake_case__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
return common_inputs
def __magic_name__ ( self : Tuple , snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : Optional[Any] = super()._flatten_past_key_values_(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
snake_case__ : Union[str, Any] = super(snake_case_ , self )._flatten_past_key_values_(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
| 502
| 1
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a__ : Optional[Any] = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase : Dict = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self ,**__snake_case ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A_ = deprecated_arg[3:]
setattr(self ,__a ,not kwargs.pop(__a ) )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
A_ = kwargs.pop('''torchscript''' ,self.torchscript )
A_ = kwargs.pop('''torch_xla_tpu_print_metrics''' ,self.torch_xla_tpu_print_metrics )
A_ = kwargs.pop('''fp16_opt_level''' ,self.fpaa_opt_level )
super().__init__(**__a )
__lowerCAmelCase : str = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Trace the models using torchscript"""} )
__lowerCAmelCase : Any = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} )
__lowerCAmelCase : Any = field(
default="""O1""" , metadata={
"""help""": (
"""For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. """
"""See details at https://nvidia.github.io/apex/amp.html"""
)
} , )
@cached_property
def __UpperCAmelCase ( self ):
"""simple docstring"""
requires_backends(self ,['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
A_ = torch.device('''cpu''' )
A_ = 0
elif is_torch_tpu_available():
A_ = xm.xla_device()
A_ = 0
else:
A_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
A_ = torch.cuda.device_count()
return device, n_gpu
@property
def __UpperCAmelCase ( self ):
"""simple docstring"""
return is_torch_tpu_available() and self.tpu
@property
def __UpperCAmelCase ( self ):
"""simple docstring"""
requires_backends(self ,['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __UpperCAmelCase ( self ):
"""simple docstring"""
requires_backends(self ,['''torch'''] )
return self._setup_devices[0]
@property
def __UpperCAmelCase ( self ):
"""simple docstring"""
requires_backends(self ,['''torch'''] )
return self._setup_devices[1]
@property
def __UpperCAmelCase ( self ):
"""simple docstring"""
return self.n_gpu > 0
| 188
|
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = WavaVecaPhonemeCTCTokenizer
A__ = False
def A_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
__snake_case : Optional[Any] = (
'<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '
'ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '
'ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '
'oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '
'pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '
'yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '
'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '
'ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '
'ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '
'uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '
'ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '
'ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '
'ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'
).split(' ' )
__snake_case : Tuple = dict(zip(__a , range(len(__a ) ) ) )
__snake_case : List[str] = {'pad_token': '<pad>', 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'}
__snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
def A_ ( self : Tuple , __a : Any , __a : str=False , __a : Tuple=20 , __a : int=5 ) -> Tuple[str, list]:
'''simple docstring'''
__snake_case : Any = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=__a )) for i in range(len(__a ) )]
__snake_case : Optional[int] = list(filter(lambda __a : [t[0]] == tokenizer.encode(t[1] , do_phonemize=__a ) , __a ) )
if max_length is not None and len(__a ) > max_length:
__snake_case : List[str] = toks[:max_length]
if min_length is not None and len(__a ) < min_length and len(__a ) > 0:
while len(__a ) < min_length:
__snake_case : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case : List[Any] = [t[0] for t in toks]
# Ensure consistency
__snake_case : Optional[Any] = tokenizer.decode(__a , clean_up_tokenization_spaces=__a )
if " " not in output_txt and len(__a ) > 1:
__snake_case : Tuple = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__a )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__a )
)
if with_prefix_space:
__snake_case : Tuple = ' ' + output_txt
__snake_case : Optional[Any] = tokenizer.encode(__a , add_special_tokens=__a )
return output_txt, output_ids
def A_ ( self : Union[str, Any] , **__a : str ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Optional[Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
# check adding a single token
tokenizer.add_tokens('xxx' )
__snake_case : Optional[Any] = tokenizer('m xxx ɪ' , do_phonemize=__a ).input_ids
self.assertEqual(__a , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(['aaa', 'bbb', 'ccc'] )
__snake_case : Union[str, Any] = tokenizer('m aaa ɪ ccc' , do_phonemize=__a ).input_ids
self.assertEqual(__a , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
__snake_case : Dict = tokenizer('maɪ c' , do_phonemize=__a ).input_ids
self.assertEqual(__a , [3, 200] ) # mai should be <unk> (=3)
def A_ ( self : Any ) -> str:
'''simple docstring'''
__snake_case : List[Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__snake_case : List[str] = 'Hello how are you'
__snake_case : Dict = tokenizer.phonemize(__a , phonemizer_lang='en-us' )
self.assertEqual(__a , 'h ə l oʊ h aʊ ɑːɹ j uː' )
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
__snake_case : Dict = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__snake_case : Optional[Any] = 'Hello how are you'
__snake_case : List[str] = tokenizer.phonemize(__a , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(__a ).input_ids , tokenizer(__a , do_phonemize=__a ).input_ids )
def A_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__snake_case : Tuple = 'Hello how are you'
__snake_case : Tuple = tokenizer.phonemize(__a , phonemizer_lang='en-us' )
__snake_case : str = tokenizer.decode(tokenizer(__a ).input_ids )
self.assertEqual(__a , __a )
def A_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__snake_case : Union[str, Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
__snake_case : Tuple = tokenizer.decode(sample_ids[0] )
__snake_case : str = tokenizer.batch_decode(__a )
self.assertEqual(__a , batch_tokens[0] )
self.assertEqual(__a , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
def A_ ( self : Tuple ) -> str:
'''simple docstring'''
__snake_case : Optional[Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
__snake_case : Optional[Any] = 'Hello how are you'
__snake_case : Union[str, Any] = tokenizer.phonemize(__a , phonemizer_lang='en-us' )
self.assertEqual(__a , 'h ə l oʊ | h aʊ | ɑːɹ | j uː |' )
def A_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
__snake_case : Any = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
__snake_case : Tuple = 'Hello how are you'
__snake_case : List[str] = tokenizer.phonemize(__a , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(__a ).input_ids , tokenizer(__a , do_phonemize=__a ).input_ids )
def A_ ( self : Tuple ) -> Dict:
'''simple docstring'''
__snake_case : List[Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
__snake_case : int = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
__snake_case : Dict = tokenizer.decode(sample_ids[0] )
__snake_case : Tuple = tokenizer.batch_decode(__a )
self.assertEqual(__a , batch_tokens[0] )
self.assertEqual(__a , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
# decode with no word_del_token filter
__snake_case : Union[str, Any] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=__a )
__snake_case : Optional[int] = tokenizer.batch_decode(__a , filter_word_delimiter_token=__a )
self.assertEqual(__a , batch_tokens[0] )
self.assertEqual(__a , ['k s ɾ | ɾ l | ɭʲ', '| j ð | s j ð s oːɹ'] )
def A_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
__snake_case : Dict = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
__snake_case : Any = 'Hello how are you'
__snake_case : Optional[int] = tokenizer.phonemize(__a , phonemizer_lang='en-us' )
__snake_case : Union[str, Any] = tokenizer.decode(tokenizer(__a ).input_ids , filter_word_delimiter_token=__a )
self.assertEqual(__a , __a )
def A_ ( self : Dict ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
__snake_case : Optional[int] = 'Hello how are you'
__snake_case : List[Any] = tokenizer.phonemize(__a , phonemizer_lang='en-us' )
__snake_case : Union[str, Any] = tokenizer.decode(tokenizer(__a ).input_ids , filter_word_delimiter_token=__a )
self.assertEqual(' '.join([p.strip() for p in phonemes.split(' |' )] ).strip() , __a )
def A_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token=__a )
__snake_case : Any = 'Hello how are you'
__snake_case : Union[str, Any] = tokenizer(__a , phonemizer_lang='en-us' ).input_ids
__snake_case : Union[str, Any] = tokenizer(__a , phonemizer_lang='fr-fr' ).input_ids
self.assertNotEqual(__a , __a )
__snake_case : str = tokenizer.decode(__a )
__snake_case : int = tokenizer.decode(__a )
self.assertEqual(__a , 'h ə l oʊ h aʊ ɑːɹ j uː' )
self.assertEqual(__a , 'ɛ l o h aʊ a ʁ j u' )
def A_ ( self : str ) -> str:
'''simple docstring'''
__snake_case : Dict = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__snake_case : List[str] = 'Hello how Are you'
__snake_case : Optional[Any] = 'hello how are you'
__snake_case : Union[str, Any] = tokenizer(__a ).input_ids
__snake_case : Any = tokenizer(__a ).input_ids
self.assertEqual(__a , __a )
def A_ ( self : List[Any] ) -> Any:
'''simple docstring'''
__snake_case : Tuple = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
tokenizer.add_tokens(['!', '?'] )
tokenizer.add_special_tokens({'cls_token': '$$$'} )
# fmt: off
__snake_case : List[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
__snake_case : str = tokenizer.batch_decode(__a )
self.assertEqual(__a , ['k s ɾ ɾ l ɭʲ!?!? $$$', 'j ð s j ð s oːɹ $$$'] )
@staticmethod
def A_ ( __a : Any , __a : Dict ) -> Tuple:
'''simple docstring'''
__snake_case : str = [d[key] for d in offsets]
return retrieved_list
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
__snake_case : Optional[int] = self.get_tokenizer(word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
__snake_case : int = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
__snake_case : Any = tokenizer.decode(__a , output_char_offsets=__a , filter_word_delimiter_token=__a )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('text' in outputs )
self.assertTrue('char_offsets' in outputs )
self.assertTrue(isinstance(__a , __a ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(' '.join(self.get_from_offsets(outputs['char_offsets'] , 'char' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'char' ) , ['k', 's', 'ɾ', 'ɾ', '|', 'ɾ', 'l', '|', 'ɭʲ'] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'start_offset' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'end_offset' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def A_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_tokenizer(word_delimiter_token='|' )
def check_list_tuples_equal(__a : int , __a : Union[str, Any] ):
self.assertTrue(isinstance(__a , __a ) )
self.assertTrue(isinstance(outputs_list[0] , __a ) )
# transform list to ModelOutput
__snake_case : Optional[int] = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['text'] , outputs_batch_a['text'] )
def recursive_check(__a : Any , __a : str ):
if isinstance(__a , __a ):
[recursive_check(__a , __a ) for la, la in zip(__a , __a )]
self.assertEqual(__a , __a )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['char_offsets'] , outputs_batch_a['char_offsets'] )
# fmt: off
__snake_case : int = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
__snake_case : List[str] = tokenizer.batch_decode(__a , output_char_offsets=__a )
__snake_case : str = [tokenizer.decode(__a , output_char_offsets=__a ) for ids in sample_ids]
check_list_tuples_equal(__a , __a )
@unittest.skip('Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes' )
def A_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('Wav2Vec2PhonemeTokenizer always puts spaces between phonemes' )
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency' )
def A_ ( self : str ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Wav2Vec2PhonemeModel has no max model length => no testing' )
def A_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def A_ ( self : Optional[int] ) -> str:
'''simple docstring'''
__snake_case : int = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : int = tokenizer.vocab_size
__snake_case : List[Any] = len(__a )
self.assertNotEqual(__a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__snake_case : Optional[Any] = ['aaaaa bbbbbb', 'cccccccccdddddddd']
__snake_case : Optional[int] = tokenizer.add_tokens(__a )
__snake_case : Optional[int] = tokenizer.vocab_size
__snake_case : Tuple = len(__a )
self.assertNotEqual(__a , 0 )
self.assertEqual(__a , __a )
self.assertEqual(__a , len(__a ) )
self.assertEqual(__a , all_size + len(__a ) )
__snake_case : Optional[int] = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=__a )
self.assertGreaterEqual(len(__a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__snake_case : Tuple = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
__snake_case : Optional[Any] = tokenizer.add_special_tokens(__a )
__snake_case : int = tokenizer.vocab_size
__snake_case : Any = len(__a )
self.assertNotEqual(__a , 0 )
self.assertEqual(__a , __a )
self.assertEqual(__a , len(__a ) )
self.assertEqual(__a , all_size_a + len(__a ) )
__snake_case : List[str] = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=__a )
self.assertGreaterEqual(len(__a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def A_ ( self : Dict ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def A_ ( self : str ) -> Any:
'''simple docstring'''
pass
def A_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
__snake_case : Optional[int] = self.get_tokenizers(fast=__a , do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : Union[str, Any] = ['ð', 'ɪ', 's', 'ɪ', 'z', 'ɐ', 't', 'ɛ', 'k', 's', 't']
__snake_case : List[Any] = tokenizer.convert_tokens_to_string(__a )
self.assertIsInstance(output['text'] , __a )
| 286
| 0
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A ( lowercase__ : Dict , lowercase__ : List[str] ) -> Optional[int]:
assert isinstance(lowercase__ , lowercase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def A ( lowercase__ : str , lowercase__ : Union[str, Any] , lowercase__ : Dict ) -> Tuple:
UpperCamelCase__ :Union[str, Any] = tmp_path / """cache"""
UpperCamelCase__ :List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :Optional[Any] = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def A ( lowercase__ : Any , lowercase__ : Dict , lowercase__ : List[Any] ) -> Tuple:
UpperCamelCase__ :Optional[Any] = tmp_path / """cache"""
UpperCamelCase__ :str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ :Union[str, Any] = features.copy() if features else default_expected_features
UpperCamelCase__ :int = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Tuple = ParquetDatasetReader(lowercase__ , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def A ( lowercase__ : List[Any] , lowercase__ : Tuple , lowercase__ : Tuple ) -> Optional[int]:
UpperCamelCase__ :Any = tmp_path / """cache"""
UpperCamelCase__ :List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ :int = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ , split=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def A ( lowercase__ : Dict , lowercase__ : Any , lowercase__ : Tuple ) -> Union[str, Any]:
if issubclass(lowercase__ , lowercase__ ):
UpperCamelCase__ :Any = parquet_path
elif issubclass(lowercase__ , lowercase__ ):
UpperCamelCase__ :Any = [parquet_path]
UpperCamelCase__ :List[str] = tmp_path / """cache"""
UpperCamelCase__ :Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ :str = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
def A ( lowercase__ : Dict , lowercase__ : List[Any] , lowercase__ : Any=("train",) ) -> Tuple:
assert isinstance(lowercase__ , lowercase__ )
for split in splits:
UpperCamelCase__ :Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def A ( lowercase__ : int , lowercase__ : Tuple , lowercase__ : Tuple ) -> Optional[Any]:
UpperCamelCase__ :Union[str, Any] = tmp_path / """cache"""
UpperCamelCase__ :List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :Any = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def A ( lowercase__ : Tuple , lowercase__ : Union[str, Any] , lowercase__ : int ) -> List[Any]:
UpperCamelCase__ :Any = tmp_path / """cache"""
UpperCamelCase__ :Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ :Optional[Any] = features.copy() if features else default_expected_features
UpperCamelCase__ :List[Any] = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :List[str] = ParquetDatasetReader({"""train""": parquet_path} , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def A ( lowercase__ : int , lowercase__ : int , lowercase__ : List[str] ) -> Union[str, Any]:
if split:
UpperCamelCase__ :List[str] = {split: parquet_path}
else:
UpperCamelCase__ :Union[str, Any] = """train"""
UpperCamelCase__ :Optional[int] = {"""train""": parquet_path, """test""": parquet_path}
UpperCamelCase__ :Optional[int] = tmp_path / """cache"""
UpperCamelCase__ :Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ :Any = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A ( lowercase__ : List[str] , lowercase__ : str ) -> List[Any]:
UpperCamelCase__ :int = ParquetDatasetWriter(lowercase__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
UpperCamelCase__ :int = pq.ParquetFile(tmp_path / """foo.parquet""" )
UpperCamelCase__ :Any = pf.read()
assert dataset.data.table == output_table
def A ( lowercase__ : Optional[int] , lowercase__ : Any ) -> Optional[int]:
UpperCamelCase__ :List[str] = str(shared_datadir / """test_image_rgb.jpg""" )
UpperCamelCase__ :Optional[int] = {"""image""": [image_path]}
UpperCamelCase__ :Union[str, Any] = Features({"""image""": Image()} )
UpperCamelCase__ :Optional[Any] = Dataset.from_dict(lowercase__ , features=lowercase__ )
UpperCamelCase__ :Tuple = ParquetDatasetWriter(lowercase__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
UpperCamelCase__ :Tuple = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
UpperCamelCase__ :Optional[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=lowercase__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def A ( lowercase__ : str , lowercase__ : str ) -> Union[str, Any]:
assert get_writer_batch_size(lowercase__ ) == expected
| 703
|
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
UpperCamelCase = logging.getLogger()
def A ( lowercase__ : str ) -> str:
UpperCamelCase__ :int = {}
UpperCamelCase__ :List[str] = os.path.join(lowercase__ , """all_results.json""" )
if os.path.exists(lowercase__ ):
with open(lowercase__ , """r""" ) as f:
UpperCamelCase__ :List[Any] = json.load(lowercase__ )
else:
raise ValueError(f"""can't find {path}""" )
return results
UpperCamelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __a ( self :Dict ):
import xla_spawn
UpperCamelCase__ :Optional[int] = self.get_auto_remove_tmp_dir()
UpperCamelCase__ :int = f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCamelCase__ , """argv""" , lowerCamelCase__ ):
UpperCamelCase__ :Any = time()
xla_spawn.main()
UpperCamelCase__ :Optional[Any] = time()
UpperCamelCase__ :Optional[Any] = get_results(lowerCamelCase__ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_00 )
def __a ( self :Union[str, Any] ):
import xla_spawn
UpperCamelCase__ :List[str] = """
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
""".split()
with patch.object(lowerCamelCase__ , """argv""" , lowerCamelCase__ ):
xla_spawn.main()
| 383
| 0
|
"""simple docstring"""
import os
import sys
import unittest
UpperCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
UpperCamelCase__ = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
UpperCamelCase__ = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple) -> int:
"""simple docstring"""
_lowerCAmelCase:Dict = get_test_to_tester_mapping(snake_case__)
_lowerCAmelCase:str = get_test_to_tester_mapping(snake_case__)
_lowerCAmelCase:Tuple = {"""BertModelTest""": """BertModelTester"""}
_lowerCAmelCase:Tuple = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(snake_case__) ,snake_case__)
self.assertEqual(get_test_info.to_json(snake_case__) ,snake_case__)
def __UpperCamelCase ( self : Any) -> Dict:
"""simple docstring"""
_lowerCAmelCase:int = get_model_to_test_mapping(snake_case__)
_lowerCAmelCase:Any = get_model_to_test_mapping(snake_case__)
_lowerCAmelCase:Tuple = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
_lowerCAmelCase:str = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(snake_case__) ,snake_case__)
self.assertEqual(get_test_info.to_json(snake_case__) ,snake_case__)
def __UpperCamelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
_lowerCAmelCase:str = get_model_to_tester_mapping(snake_case__)
_lowerCAmelCase:int = get_model_to_tester_mapping(snake_case__)
_lowerCAmelCase:int = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
_lowerCAmelCase:List[str] = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(snake_case__) ,snake_case__)
self.assertEqual(get_test_info.to_json(snake_case__) ,snake_case__)
| 227
|
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__a : Optional[int] = StableUnCLIPPipeline
__a : int = TEXT_TO_IMAGE_PARAMS
__a : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
__a : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
__a : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__a : Tuple = False
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : int = 32
lowercase_ : Tuple = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowercase_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
lowercase_ : Dict = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=snake_case__, projection_dim=snake_case__, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=10_00, ) )
torch.manual_seed(0 )
lowercase_ : Tuple = PriorTransformer(
num_attention_heads=2, attention_head_dim=12, embedding_dim=snake_case__, num_layers=1, )
torch.manual_seed(0 )
lowercase_ : Optional[Any] = DDPMScheduler(
variance_type="""fixed_small_log""", prediction_type="""sample""", num_train_timesteps=10_00, clip_sample=snake_case__, clip_sample_range=5.0, beta_schedule="""squaredcos_cap_v2""", )
# regular denoising components
torch.manual_seed(0 )
lowercase_ : Union[str, Any] = StableUnCLIPImageNormalizer(embedding_dim=snake_case__ )
lowercase_ : str = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
lowercase_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
lowercase_ : List[Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=snake_case__, projection_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=10_00, ) )
torch.manual_seed(0 )
lowercase_ : Optional[int] = UNetaDConditionModel(
sample_size=32, in_channels=4, out_channels=4, down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D"""), up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D"""), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type="""projection""", projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=snake_case__, layers_per_block=1, upcast_attention=snake_case__, use_linear_projection=snake_case__, )
torch.manual_seed(0 )
lowercase_ : Dict = DDIMScheduler(
beta_schedule="""scaled_linear""", beta_start=0.00085, beta_end=0.012, prediction_type="""v_prediction""", set_alpha_to_one=snake_case__, steps_offset=1, )
torch.manual_seed(0 )
lowercase_ : str = AutoencoderKL()
lowercase_ : str = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def snake_case__ ( self, snake_case__, snake_case__=0 ) -> str:
"""simple docstring"""
if str(snake_case__ ).startswith("""mps""" ):
lowercase_ : Tuple = torch.manual_seed(snake_case__ )
else:
lowercase_ : int = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowercase_ : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
lowercase_ : int = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=snake_case__ )
def snake_case__ ( self ) -> Any:
"""simple docstring"""
lowercase_ : List[str] = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=snake_case__ )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ) -> Any:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
lowercase_ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
lowercase_ : List[Any] = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""", torch_dtype=torch.floataa )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase_ : Dict = pipe("""anime turle""", generator=snake_case__, output_type="""np""" )
lowercase_ : str = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(snake_case__, snake_case__ )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase_ : str = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""", torch_dtype=torch.floataa )
lowercase_ : Optional[int] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ : List[Any] = pipe(
"""anime turtle""", prior_num_inference_steps=2, num_inference_steps=2, output_type="""np""", )
lowercase_ : Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 458
| 0
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int = 100_0000 ):
'''simple docstring'''
lowercase_ = limit + 1
lowercase_ = [0] * limit
for first_term in range(1 , __lowerCamelCase ):
for n in range(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
lowercase_ = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowercase_ = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 601
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str]=2 , __lowerCamelCase: List[Any]=3 , __lowerCamelCase: Optional[int]=16 , __lowerCamelCase: int = 10 , __lowerCamelCase: int = 2 ):
'''simple docstring'''
def get_dataset(__lowerCamelCase: List[Any] ):
lowercase_ = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__lowerCamelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
lowercase_ = get_dataset(__lowerCamelCase )
lowercase_ = get_dataset(__lowerCamelCase )
lowercase_ = DataLoader(__lowerCamelCase , shuffle=__lowerCamelCase , batch_size=__lowerCamelCase , num_workers=4 )
lowercase_ = DataLoader(__lowerCamelCase , shuffle=__lowerCamelCase , batch_size=__lowerCamelCase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Any , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str=None ):
'''simple docstring'''
lowercase_ = []
for epoch in range(__lowerCamelCase ):
# Train quickly
model.train()
for batch in dataloader:
lowercase_ , lowercase_ = batch
lowercase_ = model(__lowerCamelCase )
lowercase_ = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase )
accelerator.backward(__lowerCamelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self ) -> List[Any]:
'''simple docstring'''
super().__init__()
lowercase_ = nn.Parameter(torch.randn(1 ) )
lowercase_ = nn.Parameter(torch.randn(1 ) )
def A__ ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return x * self.a + self.b
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ , lowercase_ = dummy_dataloaders()
lowercase_ = ProjectConfiguration(total_limit=1 , project_dir=UpperCAmelCase , automatic_checkpoint_naming=UpperCAmelCase )
# Train baseline
lowercase_ = Accelerator(project_config=UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def A__ ( self ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ , lowercase_ = dummy_dataloaders()
# Train baseline
lowercase_ = Accelerator()
lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save initial
lowercase_ = os.path.join(UpperCAmelCase , "initial" )
accelerator.save_state(UpperCAmelCase )
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
lowercase_ = train(3 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
# Train partially
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ , lowercase_ = dummy_dataloaders()
lowercase_ = Accelerator()
lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
accelerator.load_state(UpperCAmelCase )
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
lowercase_ = train(2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save everything
lowercase_ = os.path.join(UpperCAmelCase , "checkpoint" )
accelerator.save_state(UpperCAmelCase )
# Load everything back in and make sure all states work
accelerator.load_state(UpperCAmelCase )
test_rands += train(1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ , lowercase_ = dummy_dataloaders()
lowercase_ = ProjectConfiguration(automatic_checkpoint_naming=UpperCAmelCase )
# Train baseline
lowercase_ = Accelerator(project_dir=UpperCAmelCase , project_config=UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save initial
accelerator.save_state()
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
lowercase_ = train(3 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
# Train partially
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ , lowercase_ = dummy_dataloaders()
lowercase_ = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=UpperCAmelCase )
lowercase_ = Accelerator(project_dir=UpperCAmelCase , project_config=UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
accelerator.load_state(os.path.join(UpperCAmelCase , "checkpoints" , "checkpoint_0" ) )
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
lowercase_ = train(2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCAmelCase , "checkpoints" , "checkpoint_1" ) )
test_rands += train(1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = torch.tensor([1, 2, 3] )
lowercase_ = torch.tensor([2, 3, 4] )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(net.parameters() )
lowercase_ = Accelerator()
with self.assertRaises(UpperCAmelCase ) as ve:
accelerator.register_for_checkpointing(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = str(ve.exception )
self.assertTrue("Item at index 0" in message )
self.assertTrue("Item at index 1" in message )
self.assertFalse("Item at index 2" in message )
self.assertFalse("Item at index 3" in message )
def A__ ( self ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ = torch.optim.lr_scheduler.StepLR(UpperCAmelCase , step_size=1 , gamma=0.99 )
lowercase_ , lowercase_ = dummy_dataloaders()
lowercase_ = ProjectConfiguration(automatic_checkpoint_naming=UpperCAmelCase )
# Train baseline
lowercase_ = Accelerator(project_dir=UpperCAmelCase , project_config=UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save initial
accelerator.save_state()
lowercase_ = scheduler.state_dict()
train(3 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCAmelCase , "checkpoints" , "checkpoint_0" ) )
self.assertEqual(UpperCAmelCase , scheduler.state_dict() )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = ProjectConfiguration(automatic_checkpoint_naming=UpperCAmelCase , total_limit=2 )
# Train baseline
lowercase_ = Accelerator(project_dir=UpperCAmelCase , project_config=UpperCAmelCase )
lowercase_ = accelerator.prepare(UpperCAmelCase )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(UpperCAmelCase , "checkpoints" , "checkpoint_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase , "checkpoints" , "checkpoint_9" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase , "checkpoints" , "checkpoint_10" ) ) )
@require_cuda
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = ["torchrun", F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = """/tmp/accelerate/state_checkpointing"""
SCREAMING_SNAKE_CASE__ = DummyModel()
SCREAMING_SNAKE_CASE__ = torch.optim.Adam(params=model.parameters(), lr=1E-3)
SCREAMING_SNAKE_CASE__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = dummy_dataloaders()
SCREAMING_SNAKE_CASE__ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
SCREAMING_SNAKE_CASE__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE__ = group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
SCREAMING_SNAKE_CASE__ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE__ = group["""params"""][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE__ = group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 601
| 1
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ = get_tests_dir('fixtures/test_sentencepiece.model')
SCREAMING_SNAKE_CASE_ = {"""target_lang""": """fi""", """source_lang""": """en"""}
SCREAMING_SNAKE_CASE_ = """>>zh<<"""
SCREAMING_SNAKE_CASE_ = """Helsinki-NLP/"""
if is_torch_available():
SCREAMING_SNAKE_CASE_ = """pt"""
elif is_tf_available():
SCREAMING_SNAKE_CASE_ = """tf"""
else:
SCREAMING_SNAKE_CASE_ = """jax"""
@require_sentencepiece
class snake_case_ ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = MarianTokenizer
A_ = False
A_ = True
def UpperCAmelCase__ ( self) -> Any:
super().setUp()
UpperCamelCase = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
UpperCamelCase = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
UpperCamelCase = Path(self.tmpdirname)
save_json(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab'''])
save_json(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''])
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''source_spm'''])
copyfile(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''target_spm'''])
UpperCamelCase = MarianTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
return (
"This is a test",
"This is a test",
)
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = "</s>"
UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__) , lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__) , lowerCAmelCase__)
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''</s>''')
self.assertEqual(vocab_keys[1] , '''<unk>''')
self.assertEqual(vocab_keys[-1] , '''<pad>''')
self.assertEqual(len(lowerCAmelCase__) , 9)
def UpperCAmelCase__ ( self) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 9)
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de')
UpperCamelCase = en_de_tokenizer(['''I am a small frog'''] , return_tensors=lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
UpperCamelCase = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0]
self.assertListEqual(lowerCAmelCase__ , batch.input_ids[0])
UpperCamelCase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowerCAmelCase__)
UpperCamelCase = [x.name for x in Path(lowerCAmelCase__).glob('''*''')]
self.assertIn('''source.spm''' , lowerCAmelCase__)
MarianTokenizer.from_pretrained(lowerCAmelCase__)
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = tok(
['''I am a small frog''' * 1_0_0_0, '''I am a small frog'''] , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors=lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
self.assertEqual(batch.input_ids.shape , (2, 5_1_2))
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0))
@slow
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = {"input_ids": [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''')
UpperCamelCase = "Tämä on testi"
UpperCamelCase = "This is a test"
UpperCamelCase = [7_6, 7, 2_0_4_7, 2]
UpperCamelCase = [6_9, 1_2, 1_1, 9_4_0, 2]
UpperCamelCase = tokenizer(lowerCAmelCase__).input_ids
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
UpperCamelCase = tokenizer(text_target=lowerCAmelCase__).input_ids
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
UpperCamelCase = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
| 34
|
import math
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _A ( SCREAMING_SNAKE_CASE : int = 10_001 ):
"""simple docstring"""
try:
a__ : Optional[Any] =int(SCREAMING_SNAKE_CASE )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
a__ : list[int] =[]
a__ : Union[str, Any] =2
while len(SCREAMING_SNAKE_CASE ) < nth:
if is_prime(SCREAMING_SNAKE_CASE ):
primes.append(SCREAMING_SNAKE_CASE )
num += 1
else:
num += 1
return primes[len(SCREAMING_SNAKE_CASE ) - 1]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 563
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
def lowercase ( __snake_case : int , __snake_case : int , __snake_case : bool , __snake_case : list[int] , __snake_case : float ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
)
def lowercase ( ):
lowercase_ : str = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
lowercase_ : List[Any] = math.log(len(__snake_case ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , __snake_case , __snake_case , __snake_case )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 141
|
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Any = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ : str = "OwlViTImageProcessor"
SCREAMING_SNAKE_CASE_ : Optional[int] = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : str , A : str=None , A : List[Any]=None , **A : Union[str, Any] ) -> Tuple:
lowercase_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , A , )
lowercase_ : List[Any] = kwargs.pop('''feature_extractor''' )
lowercase_ : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(A , A )
def __call__( self : List[Any] , A : List[Any]=None , A : Any=None , A : List[str]=None , A : int="max_length" , A : Optional[Any]="np" , **A : Tuple ) -> Optional[Any]:
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(A , A ) or (isinstance(A , A ) and not isinstance(text[0] , A )):
lowercase_ : Any = [self.tokenizer(A , padding=A , return_tensors=A , **A )]
elif isinstance(A , A ) and isinstance(text[0] , A ):
lowercase_ : int = []
# Maximum number of queries across batch
lowercase_ : str = max([len(A ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A ) != max_num_queries:
lowercase_ : Union[str, Any] = t + [''' '''] * (max_num_queries - len(A ))
lowercase_ : List[Any] = self.tokenizer(A , padding=A , return_tensors=A , **A )
encodings.append(A )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
lowercase_ : Optional[Any] = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowercase_ : List[Any] = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowercase_ : Tuple = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowercase_ : str = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowercase_ : Any = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
lowercase_ : Optional[Any] = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowercase_ : Union[str, Any] = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowercase_ : str = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
lowercase_ : Tuple = BatchEncoding()
lowercase_ : int = input_ids
lowercase_ : Optional[Any] = attention_mask
if query_images is not None:
lowercase_ : Optional[Any] = BatchEncoding()
lowercase_ : Union[str, Any] = self.image_processor(
A , return_tensors=A , **A ).pixel_values
lowercase_ : Union[str, Any] = query_pixel_values
if images is not None:
lowercase_ : Union[str, Any] = self.image_processor(A , return_tensors=A , **A )
if text is not None and images is not None:
lowercase_ : List[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowercase_ : Any = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A ) , tensor_type=A )
def A ( self : List[str] , *A : int , **A : Dict ) -> Optional[int]:
return self.image_processor.post_process(*A , **A )
def A ( self : Tuple , *A : str , **A : List[str] ) -> Dict:
return self.image_processor.post_process_object_detection(*A , **A )
def A ( self : Union[str, Any] , *A : List[str] , **A : str ) -> Any:
return self.image_processor.post_process_image_guided_detection(*A , **A )
def A ( self : List[Any] , *A : Any , **A : Any ) -> List[str]:
return self.tokenizer.batch_decode(*A , **A )
def A ( self : List[Any] , *A : List[Any] , **A : int ) -> Union[str, Any]:
return self.tokenizer.decode(*A , **A )
@property
def A ( self : Optional[int] ) -> Tuple:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , A , )
return self.image_processor_class
@property
def A ( self : List[Any] ) -> List[Any]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , A , )
return self.image_processor
| 141
| 1
|
"""simple docstring"""
snake_case = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def snake_case ( lowerCAmelCase_ ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = f"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(lowerCAmelCase_ )
_snake_case = ''''''.join(bin(lowerCAmelCase_ )[2:].zfill(8 ) for byte in data )
_snake_case = len(lowerCAmelCase_ ) % 6 != 0
if padding_needed:
# The padding that will be added later
_snake_case = b'''=''' * ((6 - len(lowerCAmelCase_ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowerCAmelCase_ ) % 6)
else:
_snake_case = b''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowerCAmelCase_ ) , 6 ) ).encode()
+ padding
)
def snake_case ( lowerCAmelCase_ ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = (
'''argument should be a bytes-like object or ASCII string, '''
f"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(lowerCAmelCase_ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
_snake_case = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
_snake_case = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowerCAmelCase_ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_snake_case = encoded_data[:-padding]
_snake_case = ''''''.join(
bin(B64_CHARSET.index(lowerCAmelCase_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_snake_case = ''''''.join(
bin(B64_CHARSET.index(lowerCAmelCase_ ) )[2:].zfill(6 ) for char in encoded_data )
_snake_case = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowerCAmelCase_ ) , 8 )
]
return bytes(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
@staticmethod
@abstractmethod
def __UpperCAmelCase ( __lowerCamelCase : ArgumentParser ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
raise NotImplementedError()
| 103
| 1
|
'''simple docstring'''
import math
from datetime import datetime, timedelta
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : List[str] = year % 19
snake_case__ : Optional[Any] = year % 4
snake_case__ : Optional[Any] = year % 7
snake_case__ : List[str] = math.floor(year / 100 )
snake_case__ : int = math.floor((13 + 8 * leap_day_inhibits) / 25 )
snake_case__ : Dict = leap_day_inhibits / 4
snake_case__ : Optional[int] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
snake_case__ : Optional[int] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
snake_case__ : Optional[Any] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
snake_case__ : Optional[int] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(UpperCAmelCase , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(UpperCAmelCase , 4 , 18 )
else:
return datetime(UpperCAmelCase , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
lowerCAmelCase__ = 'will be' if year > datetime.now().year else 'was'
print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
| 172
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
if len(UpperCAmelCase ) <= 1 or n <= 1:
return
insert_next(UpperCAmelCase , n - 1 )
rec_insertion_sort(UpperCAmelCase , n - 1 )
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
if index >= len(UpperCAmelCase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
snake_case__ , snake_case__ : Tuple = (
collection[index],
collection[index - 1],
)
insert_next(UpperCAmelCase , index + 1 )
if __name__ == "__main__":
lowerCAmelCase__ = input('Enter integers separated by spaces: ')
lowerCAmelCase__ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 172
| 1
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
snake_case__ : Any = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE_ (__lowerCAmelCase ):
'''simple docstring'''
def _lowerCAmelCase ( self : List[str] , __a : str , __a : List[str] , __a : List[Any]=None , __a : Optional[int]=None ) ->Any:
lowerCamelCase_ : Optional[int] = self.layer[current_layer](_lowerCamelCase , _lowerCamelCase , head_mask[current_layer] )
lowerCamelCase_ : List[str] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , __lowerCAmelCase , )
class SCREAMING_SNAKE_CASE_ (__lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , __a : Union[str, Any] ) ->Union[str, Any]:
super().__init__(_lowerCamelCase )
lowerCamelCase_ : Tuple = BertEncoderWithPabee(_lowerCamelCase )
self.init_weights()
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : int = 0
lowerCamelCase_ : int = 0
lowerCamelCase_ : List[Any] = 0
def _lowerCAmelCase ( self : List[Any] , __a : Dict ) ->Optional[Any]:
lowerCamelCase_ : Dict = threshold
def _lowerCAmelCase ( self : Dict , __a : List[str] ) ->List[str]:
lowerCamelCase_ : Any = patience
def _lowerCAmelCase ( self : List[Any] ) ->Dict:
lowerCamelCase_ : Dict = 0
lowerCamelCase_ : Union[str, Any] = 0
def _lowerCAmelCase ( self : Any ) ->int:
lowerCamelCase_ : str = self.inference_layers_num / self.inference_instances_num
lowerCamelCase_ : Any = (
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(_lowerCamelCase )
@add_start_docstrings_to_model_forward(_lowerCamelCase )
def _lowerCAmelCase ( self : List[Any] , __a : Tuple=None , __a : Union[str, Any]=None , __a : Union[str, Any]=None , __a : List[str]=None , __a : str=None , __a : int=None , __a : Dict=None , __a : Union[str, Any]=None , __a : str=None , __a : List[str]=None , __a : List[Any]=False , ) ->List[str]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
lowerCamelCase_ : List[str] = input_ids.size()
elif inputs_embeds is not None:
lowerCamelCase_ : Any = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
lowerCamelCase_ : Any = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCamelCase_ : Any = torch.ones(_lowerCamelCase , device=_lowerCamelCase )
if token_type_ids is None:
lowerCamelCase_ : Optional[Any] = torch.zeros(_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCamelCase_ : List[str] = self.get_extended_attention_mask(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : Any = encoder_hidden_states.size()
lowerCamelCase_ : int = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowerCamelCase_ : str = torch.ones(_lowerCamelCase , device=_lowerCamelCase )
lowerCamelCase_ : Tuple = self.invert_attention_mask(_lowerCamelCase )
else:
lowerCamelCase_ : Dict = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCamelCase_ : int = self.get_head_mask(_lowerCamelCase , self.config.num_hidden_layers )
lowerCamelCase_ : Any = self.embeddings(
input_ids=_lowerCamelCase , position_ids=_lowerCamelCase , token_type_ids=_lowerCamelCase , inputs_embeds=_lowerCamelCase )
lowerCamelCase_ : List[str] = embedding_output
if self.training:
lowerCamelCase_ : List[Any] = []
for i in range(self.config.num_hidden_layers ):
lowerCamelCase_ : List[Any] = self.encoder.adaptive_forward(
_lowerCamelCase , current_layer=_lowerCamelCase , attention_mask=_lowerCamelCase , head_mask=_lowerCamelCase )
lowerCamelCase_ : List[Any] = self.pooler(_lowerCamelCase )
lowerCamelCase_ : Dict = output_layers[i](output_dropout(_lowerCamelCase ) )
res.append(_lowerCamelCase )
elif self.patience == 0: # Use all layers for inference
lowerCamelCase_ : str = self.encoder(
_lowerCamelCase , attention_mask=_lowerCamelCase , head_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , )
lowerCamelCase_ : List[str] = self.pooler(encoder_outputs[0] )
lowerCamelCase_ : Tuple = [output_layers[self.config.num_hidden_layers - 1](_lowerCamelCase )]
else:
lowerCamelCase_ : Tuple = 0
lowerCamelCase_ : int = None
lowerCamelCase_ : List[str] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
lowerCamelCase_ : List[Any] = self.encoder.adaptive_forward(
_lowerCamelCase , current_layer=_lowerCamelCase , attention_mask=_lowerCamelCase , head_mask=_lowerCamelCase )
lowerCamelCase_ : List[Any] = self.pooler(_lowerCamelCase )
lowerCamelCase_ : List[str] = output_layers[i](_lowerCamelCase )
if regression:
lowerCamelCase_ : List[Any] = logits.detach()
if patient_result is not None:
lowerCamelCase_ : List[str] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
lowerCamelCase_ : List[str] = 0
else:
lowerCamelCase_ : Union[str, Any] = logits.detach().argmax(dim=1 )
if patient_result is not None:
lowerCamelCase_ : int = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_lowerCamelCase ) ):
patient_counter += 1
else:
lowerCamelCase_ : Dict = 0
lowerCamelCase_ : List[str] = logits
if patient_counter == self.patience:
break
lowerCamelCase_ : Optional[int] = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , __lowerCAmelCase , )
class SCREAMING_SNAKE_CASE_ (__lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Union[str, Any] ) ->Any:
super().__init__(_lowerCamelCase )
lowerCamelCase_ : Optional[int] = config.num_labels
lowerCamelCase_ : Tuple = BertModelWithPabee(_lowerCamelCase )
lowerCamelCase_ : List[str] = nn.Dropout(config.hidden_dropout_prob )
lowerCamelCase_ : Union[str, Any] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_lowerCamelCase )
def _lowerCAmelCase ( self : Union[str, Any] , __a : List[Any]=None , __a : List[Any]=None , __a : str=None , __a : Optional[Any]=None , __a : Optional[Any]=None , __a : Dict=None , __a : Any=None , ) ->int:
lowerCamelCase_ : Tuple = self.bert(
input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , position_ids=_lowerCamelCase , head_mask=_lowerCamelCase , inputs_embeds=_lowerCamelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowerCamelCase_ : List[str] = (logits[-1],)
if labels is not None:
lowerCamelCase_ : Any = None
lowerCamelCase_ : int = 0
for ix, logits_item in enumerate(_lowerCamelCase ):
if self.num_labels == 1:
# We are doing regression
lowerCamelCase_ : Union[str, Any] = MSELoss()
lowerCamelCase_ : Tuple = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
lowerCamelCase_ : List[str] = CrossEntropyLoss()
lowerCamelCase_ : Any = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
lowerCamelCase_ : Optional[Any] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowerCamelCase_ : List[str] = (total_loss / total_weights,) + outputs
return outputs
| 278
|
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ) -> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(__lowerCAmelCase ), magnitude * sin(__lowerCAmelCase )]
return [magnitude * cos(radians(__lowerCAmelCase ) ), magnitude * sin(radians(__lowerCAmelCase ) )]
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 10**-1 ) -> bool:
'''simple docstring'''
lowerCamelCase__ =cross(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ =sum(__lowerCAmelCase )
return abs(__lowerCAmelCase ) < eps
if __name__ == "__main__":
# Test to check if it works
a =array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
a =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
a =array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
a =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
a =array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]])
a =array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 530
| 0
|
'''simple docstring'''
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
UpperCAmelCase_ , UpperCAmelCase_ : int = array[indexa], array[indexa]
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
if length > 1:
UpperCAmelCase_ : List[str] = int(length / 2 )
for i in range(_SCREAMING_SNAKE_CASE , low + middle ):
comp_and_swap(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , i + middle , _SCREAMING_SNAKE_CASE )
bitonic_merge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
bitonic_merge(_SCREAMING_SNAKE_CASE , low + middle , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
if length > 1:
UpperCAmelCase_ : Tuple = int(length / 2 )
bitonic_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 )
bitonic_sort(_SCREAMING_SNAKE_CASE , low + middle , _SCREAMING_SNAKE_CASE , 0 )
bitonic_merge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_lowerCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
_lowerCamelCase = [int(item.strip()) for item in user_input.split(""",""")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("""\nSorted array in ascending order is: """, end="""""")
print(*unsorted, sep=""", """)
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("""Sorted array in descending order is: """, end="""""")
print(*unsorted, sep=""", """)
| 323
|
'''simple docstring'''
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
UpperCAmelCase_ , UpperCAmelCase_ : int = array[indexa], array[indexa]
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
if length > 1:
UpperCAmelCase_ : List[str] = int(length / 2 )
for i in range(_SCREAMING_SNAKE_CASE , low + middle ):
comp_and_swap(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , i + middle , _SCREAMING_SNAKE_CASE )
bitonic_merge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
bitonic_merge(_SCREAMING_SNAKE_CASE , low + middle , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
if length > 1:
UpperCAmelCase_ : Tuple = int(length / 2 )
bitonic_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 )
bitonic_sort(_SCREAMING_SNAKE_CASE , low + middle , _SCREAMING_SNAKE_CASE , 0 )
bitonic_merge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_lowerCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
_lowerCamelCase = [int(item.strip()) for item in user_input.split(""",""")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("""\nSorted array in ascending order is: """, end="""""")
print(*unsorted, sep=""", """)
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("""Sorted array in descending order is: """, end="""""")
print(*unsorted, sep=""", """)
| 323
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowercase__ : int = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''pixel_values''']
def __init__( self : List[Any] , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 2_55 , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : bool = True , **__lowercase : Optional[int] , ):
"""simple docstring"""
super().__init__(**__lowercase )
snake_case_ = size if size is not None else {"shortest_edge": 2_24}
snake_case_ = get_size_dict(__lowercase , default_to_square=__lowercase )
snake_case_ = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
snake_case_ = get_size_dict(__lowercase , default_to_square=__lowercase , param_name="crop_size" )
snake_case_ = do_resize
snake_case_ = size
snake_case_ = resample
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case_ = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case_ = do_convert_rgb
def snake_case__ ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : int , ):
"""simple docstring"""
snake_case_ = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
snake_case_ = get_resize_output_image_size(__lowercase , size=size["shortest_edge"] , default_to_square=__lowercase )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def snake_case__ ( self : List[str] , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Union[str, Any] , ):
"""simple docstring"""
snake_case_ = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__lowercase , size=(size["height"], size["width"]) , data_format=__lowercase , **__lowercase )
def snake_case__ ( self : int , __lowercase : np.ndarray , __lowercase : Union[int, float] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str , ):
"""simple docstring"""
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def snake_case__ ( self : int , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Tuple , ):
"""simple docstring"""
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def snake_case__ ( self : Tuple , __lowercase : ImageInput , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : int = None , __lowercase : bool = None , __lowercase : float = None , __lowercase : bool = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : bool = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Optional[ChannelDimension] = ChannelDimension.FIRST , **__lowercase : List[Any] , ):
"""simple docstring"""
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(__lowercase , param_name="size" , default_to_square=__lowercase )
snake_case_ = resample if resample is not None else self.resample
snake_case_ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ = crop_size if crop_size is not None else self.crop_size
snake_case_ = get_size_dict(__lowercase , param_name="crop_size" , default_to_square=__lowercase )
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = image_mean if image_mean is not None else self.image_mean
snake_case_ = image_std if image_std is not None else self.image_std
snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ = [convert_to_rgb(__lowercase ) for image in images]
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
snake_case_ = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
snake_case_ = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
snake_case_ = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
snake_case_ = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
snake_case_ = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
snake_case_ = {"pixel_values": images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 376
|
import numpy as np
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 376
| 1
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __lowerCAmelCase (_UpperCamelCase ): # picklable for multiprocessing
return x.sum()
def __lowerCAmelCase (_UpperCamelCase ): # picklable for multiprocessing
return i + 1
@dataclass
class A__ :
A_ : int
A_ : str
class A__ ( _lowerCamelCase):
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = {}
__lowerCAmelCase : Any = []
__lowerCAmelCase : int = 1
__lowerCAmelCase : Union[str, Any] = [1, 2]
__lowerCAmelCase : Any = {'a': 1, 'b': 2}
__lowerCAmelCase : Union[str, Any] = {'a': [1, 2], 'b': [3, 4]}
__lowerCAmelCase : Optional[int] = {'a': {'1': 1}, 'b': 2}
__lowerCAmelCase : Optional[int] = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
__lowerCAmelCase : Tuple = {}
__lowerCAmelCase : Optional[int] = []
__lowerCAmelCase : Tuple = 2
__lowerCAmelCase : int = [2, 3]
__lowerCAmelCase : str = {'a': 2, 'b': 3}
__lowerCAmelCase : Any = {'a': [2, 3], 'b': [4, 5]}
__lowerCAmelCase : Union[str, Any] = {'a': {'1': 2}, 'b': 3}
__lowerCAmelCase : Any = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = 2
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )}
__lowerCAmelCase : int = {'a': 2, 'b': 0, 'c': 2}
__lowerCAmelCase : List[Any] = {
'a': np.eye(2 ).astype(_SCREAMING_SNAKE_CASE ),
'b': np.zeros(3 ).astype(_SCREAMING_SNAKE_CASE ),
'c': np.ones(2 ).astype(_SCREAMING_SNAKE_CASE ),
}
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_SCREAMING_SNAKE_CASE ): # can't pickle a local lambda
map_nested(lambda _SCREAMING_SNAKE_CASE : x + 1 , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = {'a': 1, 'b': 2}
__lowerCAmelCase : Any = {'a': 3, 'b': 4}
__lowerCAmelCase : List[Any] = {'a': 5, 'b': 6}
__lowerCAmelCase : int = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
class A__ :
A_ : Optional[int] = 'bar'
__lowerCAmelCase : Dict = Foo()
self.assertEqual(foo.my_attr , 'bar' )
with temporary_assignment(_SCREAMING_SNAKE_CASE , 'my_attr' , 'BAR' ):
self.assertEqual(foo.my_attr , 'BAR' )
self.assertEqual(foo.my_attr , 'bar' )
@pytest.mark.parametrize(
'iterable_length, num_proc, expected_num_proc' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch(
'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool:
__lowerCAmelCase : int = {F"{i}": i for i in range(_UpperCamelCase )}
__lowerCAmelCase : List[Any] = map_nested(lambda _UpperCamelCase : x + 10 , _UpperCamelCase , num_proc=_UpperCamelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A__ ( _lowerCamelCase):
@require_tf
def __lowerCamelCase ( self ):
import tensorflow as tf
from tensorflow.keras import layers
__lowerCAmelCase : Tuple = layers.Dense(2 )
def gen_random_output():
__lowerCAmelCase : Optional[int] = tf.random.uniform((1, 3) )
return model(_SCREAMING_SNAKE_CASE ).numpy()
with temp_seed(42 , set_tensorflow=_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = gen_random_output()
with temp_seed(42 , set_tensorflow=_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = gen_random_output()
__lowerCAmelCase : List[Any] = gen_random_output()
np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __lowerCamelCase ( self ):
import torch
def gen_random_output():
__lowerCAmelCase : Any = torch.nn.Linear(3 , 2 )
__lowerCAmelCase : Union[str, Any] = torch.rand(1 , 3 )
return model(_SCREAMING_SNAKE_CASE ).detach().numpy()
with temp_seed(42 , set_pytorch=_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = gen_random_output()
with temp_seed(42 , set_pytorch=_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = gen_random_output()
__lowerCAmelCase : str = gen_random_output()
np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __lowerCamelCase ( self ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
__lowerCAmelCase : Tuple = gen_random_output()
with temp_seed(42 ):
__lowerCAmelCase : Dict = gen_random_output()
__lowerCAmelCase : Optional[Any] = gen_random_output()
np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('input_data' , [{}] )
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = NestedDataStructure(_UpperCamelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
'data, expected_output' , [
({}, []),
([], []),
('foo', ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
([['foo', 'bar']], ['foo', 'bar']),
([[['foo'], ['bar']]], ['foo', 'bar']),
([[['foo'], 'bar']], ['foo', 'bar']),
({'a': 1, 'b': 2}, [1, 2]),
({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]),
({'a': {'1': 1}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': [2]}, [1, 2]),
] , )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = NestedDataStructure(_UpperCamelCase ).flatten()
assert output == expected_output
def __lowerCAmelCase ():
__lowerCAmelCase : str = A(x=1 , y='foobar' )
__lowerCAmelCase : Union[str, Any] = {'x': 1, 'y': 'foobar'}
assert asdict(_UpperCamelCase ) == expected_output
__lowerCAmelCase : Optional[int] = {'a': {'b': A(x=10 , y='foo' )}, 'c': [A(x=20 , y='bar' )]}
__lowerCAmelCase : Tuple = {'a': {'b': {'x': 10, 'y': 'foo'}}, 'c': [{'x': 20, 'y': 'bar'}]}
assert asdict(_UpperCamelCase ) == expected_output
with pytest.raises(_UpperCamelCase ):
asdict([1, A(x=10 , y='foo' )] )
def __lowerCAmelCase (_UpperCamelCase ):
return text.split()
def __lowerCAmelCase (_UpperCamelCase ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __lowerCAmelCase ():
with Pool(2 ) as pool:
__lowerCAmelCase : str = list(iflatmap_unordered(_UpperCamelCase , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(_UpperCamelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
__lowerCAmelCase : str = list(iflatmap_unordered(_UpperCamelCase , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(_UpperCamelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
__lowerCAmelCase : Dict = []
for yield_time, content in iflatmap_unordered(
_UpperCamelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_UpperCamelCase )
assert out.count('a' ) == 2
assert out.count('b' ) == 2
assert len(_UpperCamelCase ) == 4
| 720
|
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[str] = len(_UpperCamelCase )
__lowerCAmelCase : Tuple = [[0] * n for i in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
__lowerCAmelCase : Any = y_points[i]
for i in range(2 , _UpperCamelCase ):
for j in range(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : int = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 549
| 0
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__a: int = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , UpperCAmelCase ):
'''simple docstring'''
def lowerCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase = load_tool("""text-question-answering""" )
self.tool.setup()
_UpperCAmelCase = load_tool("""text-question-answering""" , remote=lowerCamelCase )
def lowerCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.tool(lowerCamelCase , """What did Hugging Face do in April 2021?""" )
self.assertEqual(lowerCamelCase , """launched the BigScience Research Workshop""" )
def lowerCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.remote_tool(lowerCamelCase , """What did Hugging Face do in April 2021?""" )
self.assertEqual(lowerCamelCase , """launched the BigScience Research Workshop""" )
def lowerCamelCase ( self : Any ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.tool(text=lowerCamelCase , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(lowerCamelCase , """launched the BigScience Research Workshop""" )
def lowerCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.remote_tool(text=lowerCamelCase , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(lowerCamelCase , """launched the BigScience Research Workshop""" )
| 108
|
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCamelCase ( a , a ) -> Tuple:
'''simple docstring'''
assert isinstance(a , a )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def UpperCamelCase ( a , a , a ) -> str:
'''simple docstring'''
__magic_name__ = tmp_path / '''cache'''
__magic_name__ = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ = TextDatasetReader(a , cache_dir=a , keep_in_memory=a ).read()
_check_text_dataset(a , a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def UpperCamelCase ( a , a , a ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = tmp_path / '''cache'''
__magic_name__ = {'''text''': '''string'''}
__magic_name__ = features.copy() if features else default_expected_features
__magic_name__ = (
Features({feature: Value(a ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ = TextDatasetReader(a , features=a , cache_dir=a ).read()
_check_text_dataset(a , a )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def UpperCamelCase ( a , a , a ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = tmp_path / '''cache'''
__magic_name__ = {'''text''': '''string'''}
__magic_name__ = TextDatasetReader(a , cache_dir=a , split=a ).read()
_check_text_dataset(a , a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def UpperCamelCase ( a , a , a ) -> int:
'''simple docstring'''
if issubclass(a , a ):
__magic_name__ = text_path
elif issubclass(a , a ):
__magic_name__ = [text_path]
__magic_name__ = tmp_path / '''cache'''
__magic_name__ = {'''text''': '''string'''}
__magic_name__ = TextDatasetReader(a , cache_dir=a ).read()
_check_text_dataset(a , a )
def UpperCamelCase ( a , a , a=("train",) ) -> List[Any]:
'''simple docstring'''
assert isinstance(a , a )
for split in splits:
__magic_name__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def UpperCamelCase ( a , a , a ) -> Tuple:
'''simple docstring'''
__magic_name__ = tmp_path / '''cache'''
__magic_name__ = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ = TextDatasetReader({'''train''': text_path} , cache_dir=a , keep_in_memory=a ).read()
_check_text_datasetdict(a , a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def UpperCamelCase ( a , a , a ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__magic_name__ = {'''text''': '''string'''}
__magic_name__ = features.copy() if features else default_expected_features
__magic_name__ = (
Features({feature: Value(a ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ = TextDatasetReader({'''train''': text_path} , features=a , cache_dir=a ).read()
_check_text_datasetdict(a , a )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def UpperCamelCase ( a , a , a ) -> List[str]:
'''simple docstring'''
if split:
__magic_name__ = {split: text_path}
else:
__magic_name__ = '''train'''
__magic_name__ = {'''train''': text_path, '''test''': text_path}
__magic_name__ = tmp_path / '''cache'''
__magic_name__ = {'''text''': '''string'''}
__magic_name__ = TextDatasetReader(a , cache_dir=a ).read()
_check_text_datasetdict(a , a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 432
| 0
|
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__A : Optional[int] = logging.get_logger(__name__)
__A : List[Any] = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class UpperCAmelCase_ ( A ):
'''simple docstring'''
@add_start_docstrings(a )
def __call__( self : int , a : torch.LongTensor , a : torch.FloatTensor , **a : Tuple ) -> Optional[Any]:
raise NotImplementedError("""StoppingCriteria needs to be subclassed""" )
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def __init__( self : int , a : int , a : Optional[int] = None ) -> Dict:
SCREAMING_SNAKE_CASE = max_length
SCREAMING_SNAKE_CASE = max_position_embeddings
@add_start_docstrings(a )
def __call__( self : Dict , a : torch.LongTensor , a : torch.FloatTensor , **a : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE = input_ids.shape[-1]
SCREAMING_SNAKE_CASE = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"""This is a friendly reminder - the current text generation call will exceed the model\'s predefined """
f"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"""exceptions, performance degradation, or nothing at all.""" )
return is_done
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def __init__( self : List[Any] , a : int , a : int ) -> str:
warnings.warn(
"""The class `MaxNewTokensCriteria` is deprecated. """
f"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"""with `max_length = start_length + max_new_tokens` instead.""" , a , )
SCREAMING_SNAKE_CASE = start_length
SCREAMING_SNAKE_CASE = max_new_tokens
SCREAMING_SNAKE_CASE = start_length + max_new_tokens
@add_start_docstrings(a )
def __call__( self : Any , a : torch.LongTensor , a : torch.FloatTensor , **a : str ) -> int:
return input_ids.shape[-1] >= self.max_length
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , a : float , a : Optional[float] = None ) -> Any:
SCREAMING_SNAKE_CASE = max_time
SCREAMING_SNAKE_CASE = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(a )
def __call__( self : Optional[Any] , a : torch.LongTensor , a : torch.FloatTensor , **a : str ) -> Union[str, Any]:
return time.time() - self.initial_timestamp > self.max_time
class UpperCAmelCase_ ( A ):
'''simple docstring'''
@add_start_docstrings(a )
def __call__( self : str , a : torch.LongTensor , a : torch.FloatTensor , **a : List[Any] ) -> Optional[Any]:
return any(criteria(a , a ) for criteria in self )
@property
def _UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
for stopping_criterium in self:
if isinstance(a , a ):
return stopping_criterium.max_length
elif isinstance(a , a ):
return stopping_criterium.max_length
return None
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = stopping_criteria.max_length
SCREAMING_SNAKE_CASE = deepcopy(SCREAMING_SNAKE_CASE_ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""" , SCREAMING_SNAKE_CASE_ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=SCREAMING_SNAKE_CASE_ ) )
return new_stopping_criteria
| 703
|
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__A : Dict = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__A : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(SCREAMING_SNAKE_CASE ) - np.asarray(SCREAMING_SNAKE_CASE )) ** 2 ) )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ** (1 / 2)
if __name__ == "__main__":
def lowerCamelCase_ ( ):
'''simple docstring'''
from timeit import timeit
print("""Without Numpy""" )
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""" , number=1_00_00 , globals=globals() , ) )
print("""With Numpy""" )
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""" , number=1_00_00 , globals=globals() , ) )
benchmark()
| 450
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase = 16
UpperCamelCase = 32
def _A ( lowerCAmelCase_ : Accelerator , lowerCAmelCase_ : int = 16 ):
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCAmelCase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCAmelCase_ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCAmelCase_ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ = 8
else:
lowerCAmelCase__ = None
return tokenizer.pad(
lowerCAmelCase_ , padding="longest" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
lowerCAmelCase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase = mocked_dataloaders # noqa: F811
def _A ( lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCAmelCase_ ) == "1":
lowerCAmelCase__ = 2
# New Code #
lowerCAmelCase__ = int(args.gradient_accumulation_steps )
lowerCAmelCase__ = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowerCAmelCase_ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ = config["lr"]
lowerCAmelCase__ = int(config["num_epochs"] )
lowerCAmelCase__ = int(config["seed"] )
lowerCAmelCase__ = int(config["batch_size"] )
lowerCAmelCase__ = evaluate.load("glue" , "mrpc" )
set_seed(lowerCAmelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
# Instantiate scheduler
lowerCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
with LocalSGD(
accelerator=lowerCAmelCase_ , model=lowerCAmelCase_ , local_sgd_steps=lowerCAmelCase_ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowerCAmelCase_ ):
lowerCAmelCase__ = model(**lowerCAmelCase_ )
lowerCAmelCase__ = output.loss
accelerator.backward(lowerCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCAmelCase_ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
lowerCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCAmelCase_ )
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=lowerCAmelCase_ , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument(
"--local_sgd_steps" , type=lowerCAmelCase_ , default=8 , help="Number of local SGD steps or None to disable local SGD" )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 61
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple=13 , SCREAMING_SNAKE_CASE__ : Optional[Any]=10 , SCREAMING_SNAKE_CASE__ : Optional[int]=3 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any=32 , SCREAMING_SNAKE_CASE__ : Optional[int]=5 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : List[Any]=37 , SCREAMING_SNAKE_CASE__ : int="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Any=10 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : Tuple="divided_space_time" , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ) -> List[str]:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_frames
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = attention_type
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = scope
lowerCAmelCase__ = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowerCAmelCase__ = (image_size // patch_size) ** 2
lowerCAmelCase__ = (num_frames) * self.num_patches_per_frame + 1
def a ( self : int ) -> Tuple:
lowerCAmelCase__ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def a ( self : List[Any] ) -> Any:
lowerCAmelCase__ = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowerCAmelCase__ = self.num_labels
return config
def a ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
lowerCAmelCase__ = TimesformerModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
lowerCAmelCase__ = TimesformerForVideoClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
# verify the logits shape
lowerCAmelCase__ = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple ) -> Dict:
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
snake_case__ = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def a ( self : List[str] ) -> List[Any]:
lowerCAmelCase__ = TimesformerModelTester(self )
lowerCAmelCase__ = ConfigTester(
self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> str:
lowerCAmelCase__ = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
return inputs_dict
def a ( self : Optional[Any] ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def a ( self : Union[str, Any] ) -> Tuple:
pass
def a ( self : Dict ) -> List[str]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) )
def a ( self : int ) -> Optional[Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def a ( self : int ) -> Optional[Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] ) -> Tuple:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def a ( self : str ) -> Tuple:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TimesformerModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def a ( self : int ) -> Dict:
if not self.has_attentions:
pass
else:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
for model_class in self.all_model_classes:
lowerCAmelCase__ = self.model_tester.seq_length
lowerCAmelCase__ = self.model_tester.num_frames
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
# Check attention is always last and order is fine
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(out_len + 1 , len(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def a ( self : List[str] ) -> Any:
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] ):
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = outputs.hidden_states
lowerCAmelCase__ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
lowerCAmelCase__ = np.load(lowerCAmelCase_ )
return list(lowerCAmelCase_ )
@require_torch
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a ( self : Optional[Any] ) -> Union[str, Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def a ( self : Optional[Any] ) -> str:
lowerCAmelCase__ = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_video()
lowerCAmelCase__ = image_processor(video[:8] , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
lowerCAmelCase__ = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 61
| 1
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
# TODO Update this
_SCREAMING_SNAKE_CASE : Optional[int] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "esm"
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=1026 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1e-12 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ):
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[Any] = vocab_size
A__ : int = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : str = intermediate_size
A__ : List[str] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : int = max_position_embeddings
A__ : List[str] = initializer_range
A__ : List[Any] = layer_norm_eps
A__ : int = position_embedding_type
A__ : Optional[Any] = use_cache
A__ : Optional[int] = emb_layer_norm_before
A__ : List[str] = token_dropout
A__ : Tuple = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
A__ : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[int] = EsmFoldConfig(**UpperCamelCase__ )
A__ : int = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
A__ : Any = get_default_vocab_list()
else:
A__ : Dict = vocab_list
else:
A__ : Optional[Any] = None
A__ : Tuple = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCamelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __snake_case ( self ):
A__ : Optional[int] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
A__ : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = 0
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.trunk is None:
A__ : Tuple = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
A__ : List[Any] = TrunkConfig(**self.trunk )
def __snake_case ( self ):
A__ : Optional[int] = asdict(self )
A__ : int = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 48
_lowerCAmelCase = 1_024
_lowerCAmelCase = 128
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = False
_lowerCAmelCase = 4
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.structure_module is None:
A__ : str = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
A__ : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
A__ : Tuple = self.sequence_state_dim // self.sequence_head_width
A__ : int = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __snake_case ( self ):
A__ : List[Any] = asdict(self )
A__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 384
_lowerCAmelCase = 128
_lowerCAmelCase = 16
_lowerCAmelCase = 128
_lowerCAmelCase = 12
_lowerCAmelCase = 4
_lowerCAmelCase = 8
_lowerCAmelCase = 0.1
_lowerCAmelCase = 8
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 7
_lowerCAmelCase = 10
_lowerCAmelCase = 1e-8
_lowerCAmelCase = 1e5
def __snake_case ( self ):
return asdict(self )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 55
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE : List[Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
_SCREAMING_SNAKE_CASE : int = get_tests_dir('fixtures/vocab.json')
_SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures')
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def __snake_case ( self ):
A__ : List[Any] = 0
def __snake_case ( self ):
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[Any] = WavaVecaConfig()
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
A__ : Any = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Dict = WavaVecaFeatureExtractor()
A__ : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : Optional[int] = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in tokenizer
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : str = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : Optional[int] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[int] = WavaVecaFeatureExtractor()
A__ : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : str = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in feature extractor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : List[Any] = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(UpperCamelCase__ )
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write('''{}''' )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase__ ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
A__ : str = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
A__ : int = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
A__ : List[Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
A__ : List[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
A__ : Dict = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
A__ : int = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __snake_case ( self ):
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ : Any = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : str = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : str = CustomTokenizer(UpperCamelCase__ )
A__ : Optional[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(UpperCamelCase__ )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "AutoFeatureExtractor"
_lowerCAmelCase = "AutoTokenizer"
_lowerCAmelCase = False
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# If remote code is not set, the default is to use local classes.
A__ : List[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
A__ : Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
A__ : Union[str, Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
A__ : str = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __snake_case ( self ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __snake_case ( cls ):
A__ : List[str] = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def __snake_case ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __snake_case ( self ):
A__ : Optional[Any] = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
A__ : List[Any] = WavaVecaProcessor.from_pretrained(F"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
A__ : int = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor-org''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token , organization='''valid_org''' , )
A__ : List[str] = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
A__ : Optional[Any] = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : List[Any] = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : Union[str, Any] = CustomTokenizer(UpperCamelCase__ )
A__ : List[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"{USER}/test-dynamic-processor" , token=self._token )
A__ : Union[str, Any] = Repository(UpperCamelCase__ , clone_from=F"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(UpperCamelCase__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(UpperCamelCase__ , '''tokenizer_config.json''' ) ) as f:
A__ : Optional[int] = json.load(UpperCamelCase__ )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_processing.py''' ) ) )
repo.push_to_hub()
A__ : Tuple = AutoProcessor.from_pretrained(F"{USER}/test-dynamic-processor" , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 55
| 1
|
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
A_ = logging.getLogger(__name__)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
# save results
if os.path.exists(lowerCAmelCase__ ):
if os.path.exists(os.path.join(lowerCAmelCase__ ,'''config.json''' ) ) and os.path.isfile(
os.path.join(lowerCAmelCase__ ,'''config.json''' ) ):
os.remove(os.path.join(lowerCAmelCase__ ,'''config.json''' ) )
if os.path.exists(os.path.join(lowerCAmelCase__ ,'''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(lowerCAmelCase__ ,'''pytorch_model.bin''' ) ):
os.remove(os.path.join(lowerCAmelCase__ ,'''pytorch_model.bin''' ) )
else:
os.makedirs(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=False ):
lowerCamelCase_ = 2
if unlogit:
lowerCamelCase_ = torch.pow(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = p * torch.log(lowerCAmelCase__ )
lowerCamelCase_ = 0
return -plogp.sum(dim=-1 )
def lowercase ( lowerCAmelCase__ ):
logger.info('''lv, h >\t''' + '''\t'''.join(f"{x + 1}" for x in range(len(lowerCAmelCase__ ) ) ) )
for row in range(len(lowerCAmelCase__ ) ):
if tensor.dtype != torch.long:
logger.info(f"layer {row + 1}:\t" + '''\t'''.join(f"{x:.5f}" for x in tensor[row].cpu().data ) )
else:
logger.info(f"layer {row + 1}:\t" + '''\t'''.join(f"{x:d}" for x in tensor[row].cpu().data ) )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=True ,lowerCAmelCase__=True ,lowerCAmelCase__=None ,lowerCAmelCase__=False ):
lowerCamelCase_ , lowerCamelCase_ = model.config.num_hidden_layers, model.config.num_attention_heads
lowerCamelCase_ = torch.zeros(lowerCAmelCase__ ,lowerCAmelCase__ ).to(args.device )
lowerCamelCase_ = torch.zeros(lowerCAmelCase__ ,lowerCAmelCase__ ).to(args.device )
if head_mask is None:
lowerCamelCase_ = torch.ones(lowerCAmelCase__ ,lowerCAmelCase__ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCAmelCase__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowerCamelCase_ = None
lowerCamelCase_ = 0.0
lowerCamelCase_ = 0.0
for step, inputs in enumerate(tqdm(lowerCAmelCase__ ,desc='''Iteration''' ,disable=args.local_rank not in [-1, 0] ) ):
lowerCamelCase_ = tuple(t.to(args.device ) for t in inputs )
((lowerCamelCase_) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowerCamelCase_ = model(lowerCAmelCase__ ,labels=lowerCAmelCase__ ,head_mask=lowerCAmelCase__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ = entropy(attn.detach() ,lowerCAmelCase__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCAmelCase__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowerCamelCase_ = 2
lowerCamelCase_ = torch.pow(torch.pow(lowerCAmelCase__ ,lowerCAmelCase__ ).sum(-1 ) ,1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
lowerCamelCase_ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(lowerCAmelCase__ )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(lowerCAmelCase__ )
logger.info('''Head ranked by importance scores''' )
lowerCamelCase_ = torch.zeros(head_importance.numel() ,dtype=torch.long ,device=args.device )
lowerCamelCase_ = torch.arange(
head_importance.numel() ,device=args.device )
lowerCamelCase_ = head_ranks.view_as(lowerCAmelCase__ )
print_ad_tensor(lowerCAmelCase__ )
return attn_entropy, head_importance, total_loss
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = compute_heads_importance(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,compute_entropy=lowerCAmelCase__ )
lowerCamelCase_ = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' ,lowerCAmelCase__ ,original_score * args.masking_threshold )
lowerCamelCase_ = torch.ones_like(lowerCAmelCase__ )
lowerCamelCase_ = max(1 ,int(new_head_mask.numel() * args.masking_amount ) )
lowerCamelCase_ = original_score
while current_score >= original_score * args.masking_threshold:
lowerCamelCase_ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowerCamelCase_ = float('''Inf''' )
lowerCamelCase_ = head_importance.view(-1 ).sort()[1]
if len(lowerCAmelCase__ ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
lowerCamelCase_ = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' ,str(current_heads_to_mask.tolist() ) )
lowerCamelCase_ = new_head_mask.view(-1 )
lowerCamelCase_ = 0.0
lowerCamelCase_ = new_head_mask.view_as(lowerCAmelCase__ )
lowerCamelCase_ = new_head_mask.clone().detach()
print_ad_tensor(lowerCAmelCase__ )
# Compute metric and head importance again
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = compute_heads_importance(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,compute_entropy=lowerCAmelCase__ ,head_mask=lowerCAmelCase__ )
lowerCamelCase_ = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' ,lowerCAmelCase__ ,new_head_mask.sum() ,new_head_mask.sum() / new_head_mask.numel() * 100 ,)
logger.info('''Final head mask''' )
print_ad_tensor(lowerCAmelCase__ )
np.save(os.path.join(args.output_dir ,'''head_mask.npy''' ) ,head_mask.detach().cpu().numpy() )
return head_mask
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = datetime.now()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = compute_heads_importance(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,compute_entropy=lowerCAmelCase__ ,compute_importance=lowerCAmelCase__ ,head_mask=lowerCAmelCase__ )
lowerCamelCase_ = 1 / loss
lowerCamelCase_ = datetime.now() - before_time
lowerCamelCase_ = sum(p.numel() for p in model.parameters() )
lowerCamelCase_ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCAmelCase__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = [
v,
]
assert sum(len(lowerCAmelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCAmelCase__ )
lowerCamelCase_ = sum(p.numel() for p in model.parameters() )
lowerCamelCase_ = datetime.now()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = compute_heads_importance(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,compute_entropy=lowerCAmelCase__ ,compute_importance=lowerCAmelCase__ ,head_mask=lowerCAmelCase__ ,actually_pruned=lowerCAmelCase__ ,)
lowerCamelCase_ = 1 / loss
lowerCamelCase_ = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' ,lowerCAmelCase__ ,lowerCAmelCase__ ,pruned_num_params / original_num_params * 100 ,)
logger.info('''Pruning: score with masking: %f score with pruning: %f''' ,lowerCAmelCase__ ,lowerCAmelCase__ )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' ,original_time / new_time * 100 )
save_model(lowerCAmelCase__ ,args.output_dir )
def lowercase ( ):
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' ,default=lowerCAmelCase__ ,type=lowerCAmelCase__ ,required=lowerCAmelCase__ ,help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' ,)
parser.add_argument(
'''--model_name_or_path''' ,default=lowerCAmelCase__ ,type=lowerCAmelCase__ ,required=lowerCAmelCase__ ,help='''Path to pretrained model or model identifier from huggingface.co/models''' ,)
parser.add_argument(
'''--output_dir''' ,default=lowerCAmelCase__ ,type=lowerCAmelCase__ ,required=lowerCAmelCase__ ,help='''The output directory where the model predictions and checkpoints will be written.''' ,)
# Other parameters
parser.add_argument(
'''--config_name''' ,default='''''' ,type=lowerCAmelCase__ ,help='''Pretrained config name or path if not the same as model_name_or_path''' ,)
parser.add_argument(
'''--tokenizer_name''' ,default='''''' ,type=lowerCAmelCase__ ,help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' ,)
parser.add_argument(
'''--cache_dir''' ,default=lowerCAmelCase__ ,type=lowerCAmelCase__ ,help='''Where do you want to store the pre-trained models downloaded from s3''' ,)
parser.add_argument(
'''--data_subset''' ,type=lowerCAmelCase__ ,default=-1 ,help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' ,action='''store_true''' ,help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' ,action='''store_true''' ,help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' ,action='''store_true''' ,help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' ,action='''store_true''' ,help='''Don\'t normalize all importance scores between 0 and 1''' ,)
parser.add_argument(
'''--try_masking''' ,action='''store_true''' ,help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' ,default=0.9 ,type=lowerCAmelCase__ ,help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' ,)
parser.add_argument(
'''--masking_amount''' ,default=0.1 ,type=lowerCAmelCase__ ,help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' ,default='''acc''' ,type=lowerCAmelCase__ ,help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' ,default=128 ,type=lowerCAmelCase__ ,help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) ,)
parser.add_argument('''--batch_size''' ,default=1 ,type=lowerCAmelCase__ ,help='''Batch size.''' )
parser.add_argument('''--seed''' ,type=lowerCAmelCase__ ,default=42 )
parser.add_argument('''--local_rank''' ,type=lowerCAmelCase__ ,default=-1 ,help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' ,action='''store_true''' ,help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' ,type=lowerCAmelCase__ ,default='''''' ,help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' ,type=lowerCAmelCase__ ,default='''''' ,help='''Can be used for distant debugging.''' )
lowerCamelCase_ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) ,redirect_output=lowerCAmelCase__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowerCamelCase_ = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
lowerCamelCase_ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowerCamelCase_ = torch.device('''cuda''' ,args.local_rank )
lowerCamelCase_ = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device ,args.n_gpu ,bool(args.local_rank != -1 ) ) )
lowerCamelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowerCamelCase_ = nn.parallel.DistributedDataParallel(
lowerCAmelCase__ ,device_ids=[args.local_rank] ,output_device=args.local_rank ,find_unused_parameters=lowerCAmelCase__ )
elif args.n_gpu > 1:
lowerCamelCase_ = nn.DataParallel(lowerCAmelCase__ )
# Print/save training arguments
os.makedirs(args.output_dir ,exist_ok=lowerCAmelCase__ )
torch.save(lowerCAmelCase__ ,os.path.join(args.output_dir ,'''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' ,lowerCAmelCase__ )
# Prepare dataset
lowerCamelCase_ = np.concatenate(
[
np.loadtxt(args.data_dir ,dtype=np.intaa ),
] )
lowerCamelCase_ = (torch.from_numpy(lowerCAmelCase__ ),)
lowerCamelCase_ = TensorDataset(*lowerCAmelCase__ )
lowerCamelCase_ = RandomSampler(lowerCAmelCase__ )
lowerCamelCase_ = DataLoader(lowerCAmelCase__ ,sampler=lowerCAmelCase__ ,batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowerCamelCase_ = mask_heads(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
prune_heads(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 29
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
lowerCamelCase : List[Any] = jnp.ones((batch_size, length) ) / length
return scores
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : Optional[Any] = None
lowerCamelCase : Optional[Any] = 20
lowerCamelCase : List[Any] = self._get_uniform_logits(batch_size=2 , length=UpperCamelCase__ )
# tweak scores to not be uniform anymore
lowerCamelCase : Optional[int] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase : List[Any] = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase : List[Any] = jax.nn.softmax(UpperCamelCase__ , axis=-1 )
lowerCamelCase : str = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase : Tuple = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase : List[Any] = jax.nn.softmax(temp_dist_warper_sharper(UpperCamelCase__ , scores.copy() , cur_len=UpperCamelCase__ ) , axis=-1 )
lowerCamelCase : Optional[int] = jax.nn.softmax(temp_dist_warper_smoother(UpperCamelCase__ , scores.copy() , cur_len=UpperCamelCase__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _lowercase ( self ) -> List[str]:
lowerCamelCase : Dict = None
lowerCamelCase : List[str] = 10
lowerCamelCase : Optional[int] = 2
# create ramp distribution
lowerCamelCase : Dict = np.broadcast_to(np.arange(UpperCamelCase__ )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase : Tuple = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase : List[Any] = FlaxTopKLogitsWarper(3 )
lowerCamelCase : str = top_k_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase : Union[str, Any] = 5
lowerCamelCase : Tuple = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase : Union[str, Any] = np.broadcast_to(np.arange(UpperCamelCase__ )[None, :] , (batch_size, length) ).copy()
lowerCamelCase : Union[str, Any] = top_k_warp_safety_check(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Dict = None
lowerCamelCase : Tuple = 10
lowerCamelCase : int = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase : int = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCamelCase : Dict = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase : List[str] = np.exp(top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase : List[str] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase : List[str] = np.broadcast_to(np.arange(UpperCamelCase__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase : str = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCamelCase : str = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase : Optional[Any] = top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : int = 20
lowerCamelCase : Optional[Any] = 4
lowerCamelCase : List[str] = 0
lowerCamelCase : List[str] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase__ )
# check that min length is applied at length 5
lowerCamelCase : str = ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase : Any = 5
lowerCamelCase : Optional[int] = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = min_dist_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
lowerCamelCase : Union[str, Any] = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : List[Any] = 15
lowerCamelCase : str = min_dist_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertFalse(jnp.isinf(UpperCamelCase__ ).any() )
def _lowercase ( self ) -> List[str]:
lowerCamelCase : Any = 20
lowerCamelCase : List[str] = 4
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : Union[str, Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase__ )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase : Any = ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase : Any = 1
lowerCamelCase : Tuple = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase : str = 3
lowerCamelCase : Union[str, Any] = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[int] = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertFalse(jnp.isinf(UpperCamelCase__ ).any() )
def _lowercase ( self ) -> Tuple:
lowerCamelCase : Optional[int] = 20
lowerCamelCase : str = 4
lowerCamelCase : str = 0
lowerCamelCase : Any = 5
lowerCamelCase : Optional[int] = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase : List[str] = ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase : Any = 4
lowerCamelCase : Any = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : List[str] = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase : Optional[Any] = 3
lowerCamelCase : Optional[Any] = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[int] = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertFalse(jnp.isinf(UpperCamelCase__ ).any() )
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : str = 4
lowerCamelCase : List[str] = 10
lowerCamelCase : List[str] = 15
lowerCamelCase : Optional[int] = 2
lowerCamelCase : List[Any] = 1
lowerCamelCase : str = 15
# dummy input_ids and scores
lowerCamelCase : Dict = ids_tensor((batch_size, sequence_length) , UpperCamelCase__ )
lowerCamelCase : Tuple = input_ids.copy()
lowerCamelCase : List[Any] = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = scores.copy()
# instantiate all dist processors
lowerCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase : str = FlaxTopKLogitsWarper(3 )
lowerCamelCase : Tuple = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase : List[str] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase__ )
lowerCamelCase : str = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase__ )
lowerCamelCase : List[str] = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
lowerCamelCase : Optional[int] = 10
# no processor list
lowerCamelCase : Dict = temp_dist_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Tuple = top_k_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : List[str] = top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = min_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Optional[int] = bos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = eos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# with processor list
lowerCamelCase : List[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase : Union[str, Any] = processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _lowercase ( self ) -> Any:
lowerCamelCase : List[Any] = 4
lowerCamelCase : Optional[int] = 10
lowerCamelCase : Dict = 15
lowerCamelCase : Optional[int] = 2
lowerCamelCase : Dict = 1
lowerCamelCase : Optional[Any] = 15
# dummy input_ids and scores
lowerCamelCase : List[Any] = ids_tensor((batch_size, sequence_length) , UpperCamelCase__ )
lowerCamelCase : Any = input_ids.copy()
lowerCamelCase : Dict = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Dict = scores.copy()
# instantiate all dist processors
lowerCamelCase : int = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase : List[Any] = FlaxTopKLogitsWarper(3 )
lowerCamelCase : List[str] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase : int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase__ )
lowerCamelCase : List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
lowerCamelCase : Dict = 10
# no processor list
def run_no_processor_list(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Dict = temp_dist_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Tuple = top_k_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Optional[int] = top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : int = min_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = bos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Dict = eos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
return scores
# with processor list
def run_processor_list(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase : Tuple = processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
return scores
lowerCamelCase : Dict = jax.jit(UpperCamelCase__ )
lowerCamelCase : Optional[int] = jax.jit(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = jitted_run_no_processor_list(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = jitted_run_processor_list(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 311
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/resolve/main/config.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/config.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/config.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json""",
}
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : List[str] = """bloom"""
__a : Union[str, Any] = ["""past_key_values"""]
__a : List[Any] = {
"""num_hidden_layers""": """n_layer""",
"""num_attention_heads""": """n_head""",
}
def __init__( self, snake_case__=25_08_80, snake_case__=64, snake_case__=2, snake_case__=8, snake_case__=1E-5, snake_case__=0.02, snake_case__=True, snake_case__=1, snake_case__=2, snake_case__=False, snake_case__=0.0, snake_case__=0.0, snake_case__=1, snake_case__=False, **snake_case__, ) -> Optional[int]:
"""simple docstring"""
lowercase_ : str = vocab_size
# Backward compatibility with n_embed kwarg
lowercase_ : Dict = kwargs.pop("""n_embed""", snake_case__ )
lowercase_ : str = hidden_size if n_embed is None else n_embed
lowercase_ : int = n_layer
lowercase_ : Optional[int] = n_head
lowercase_ : Optional[Any] = layer_norm_epsilon
lowercase_ : int = initializer_range
lowercase_ : str = use_cache
lowercase_ : Any = pretraining_tp
lowercase_ : Dict = apply_residual_connection_post_layernorm
lowercase_ : List[Any] = hidden_dropout
lowercase_ : Optional[int] = attention_dropout
lowercase_ : Optional[Any] = bos_token_id
lowercase_ : int = eos_token_id
lowercase_ : str = slow_but_exact
super().__init__(bos_token_id=snake_case__, eos_token_id=snake_case__, **snake_case__ )
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : Optional[Any] = version.parse("""1.12""" )
def __init__( self, snake_case__, snake_case__ = "default", snake_case__ = None, snake_case__ = False, ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(snake_case__, task=snake_case__, patching_specs=snake_case__, use_past=snake_case__ )
if not getattr(self._config, """pad_token_id""", snake_case__ ):
# TODO: how to do that better?
lowercase_ : Union[str, Any] = 0
@property
def snake_case__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowercase_ : Optional[int] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(snake_case__, direction="""inputs""", inverted_values_shape=snake_case__ )
lowercase_ : int = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowercase_ : str = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def snake_case__ ( self ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def snake_case__ ( self ) -> int:
"""simple docstring"""
return self._config.n_head
@property
def snake_case__ ( self ) -> float:
"""simple docstring"""
return 1E-3
def snake_case__ ( self, snake_case__, snake_case__ = -1, snake_case__ = -1, snake_case__ = False, snake_case__ = None, ) -> Mapping[str, Any]:
"""simple docstring"""
lowercase_ : Optional[Any] = super(snake_case__, self ).generate_dummy_inputs(
snake_case__, batch_size=snake_case__, seq_length=snake_case__, is_pair=snake_case__, framework=snake_case__ )
# We need to order the input in the way they appears in the forward()
lowercase_ : Union[str, Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowercase_ , lowercase_ : Dict = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowercase_ : Optional[int] = seqlen + 2
lowercase_ : Union[str, Any] = self._config.hidden_size // self.num_attention_heads
lowercase_ : int = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
lowercase_ : Optional[int] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
lowercase_ : int = [
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers )
]
lowercase_ : Tuple = common_inputs["""attention_mask"""]
if self.use_past:
lowercase_ : int = ordered_inputs["""attention_mask"""].dtype
lowercase_ : Union[str, Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(snake_case__, snake_case__, dtype=snake_case__ )], dim=1 )
return ordered_inputs
@property
def snake_case__ ( self ) -> int:
"""simple docstring"""
return 13
| 436
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __magic_name__ ( lowercase=None ) -> Dict:
"""simple docstring"""
if subparsers is not None:
lowercase_ : List[str] = subparsers.add_parser("""test""" )
else:
lowercase_ : Optional[Any] = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=lowercase , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase )
return parser
def __magic_name__ ( lowercase ) -> str:
"""simple docstring"""
lowercase_ : Union[str, Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
lowercase_ : List[Any] = script_name
else:
lowercase_ : Dict = f"""--config_file={args.config_file} {script_name}"""
lowercase_ : List[str] = ["""accelerate-launch"""] + test_args.split()
lowercase_ : int = execute_subprocess_async(lowercase , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def __magic_name__ ( ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[Any] = test_command_parser()
lowercase_ : Any = parser.parse_args()
test_command(lowercase )
if __name__ == "__main__":
main()
| 436
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : int = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class UpperCamelCase_ ( __snake_case ):
'''simple docstring'''
UpperCamelCase : Any = 'nllb-moe'
UpperCamelCase : Optional[int] = ['past_key_values']
UpperCamelCase : Dict = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self :Dict , lowerCAmelCase__ :Union[str, Any]=128112 , lowerCAmelCase__ :Union[str, Any]=1024 , lowerCAmelCase__ :Union[str, Any]=12 , lowerCAmelCase__ :Dict=4096 , lowerCAmelCase__ :Tuple=16 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Union[str, Any]=4096 , lowerCAmelCase__ :Optional[Any]=16 , lowerCAmelCase__ :Union[str, Any]=0.05 , lowerCAmelCase__ :List[str]=0.05 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :List[str]="relu" , lowerCAmelCase__ :Optional[int]=1024 , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :List[Any]=0.0 , lowerCAmelCase__ :str=0.02 , lowerCAmelCase__ :Optional[Any]=2 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Any=False , lowerCAmelCase__ :Dict="float32" , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Tuple=128 , lowerCAmelCase__ :str=64 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :List[Any]=4 , lowerCAmelCase__ :List[Any]=0.0_01 , lowerCAmelCase__ :Dict=0.0_01 , lowerCAmelCase__ :Union[str, Any]="all" , lowerCAmelCase__ :Dict=False , lowerCAmelCase__ :Union[str, Any]=False , lowerCAmelCase__ :str=1.0 , lowerCAmelCase__ :Tuple=0.2 , lowerCAmelCase__ :List[Any]=1 , lowerCAmelCase__ :Dict=0 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :Dict=False , **lowerCAmelCase__ :str , ) ->Optional[Any]:
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = use_cache
lowercase = encoder_layers
lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase = router_z_loss_coef
lowercase = router_aux_loss_coef
lowercase = decoder_sparse_step
lowercase = encoder_sparse_step
lowercase = num_experts
lowercase = expert_capacity
lowercase = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
lowercase = router_dtype
lowercase = router_ignore_padding_tokens
lowercase = batch_prioritized_routing
lowercase = second_expert_policy
lowercase = normalize_router_prob_before_dropping
lowercase = moe_eval_capacity_token_fraction
lowercase = moe_token_dropout
lowercase = output_router_logits
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 441
|
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __a :
pass
| 552
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : List[Any] = 1
snake_case : Optional[int] = 3
snake_case : Tuple = (32, 32)
snake_case : List[str] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase__ )
return image
@property
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case : Tuple = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=UpperCAmelCase__ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def lowerCAmelCase( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
return CLIPTextModel(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case : int = self.dummy_cond_unet_upscale
snake_case : List[Any] = DDPMScheduler()
snake_case : str = DDIMScheduler(prediction_type='''v_prediction''' )
snake_case : Any = self.dummy_vae
snake_case : Union[str, Any] = self.dummy_text_encoder
snake_case : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case : Union[str, Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case : Optional[Any] = Image.fromarray(np.uinta(UpperCAmelCase__ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
snake_case : Optional[int] = StableDiffusionUpscalePipeline(
unet=UpperCAmelCase__ , low_res_scheduler=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , vae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , max_noise_level=350 , )
snake_case : Any = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
snake_case : List[str] = '''A painting of a squirrel eating a burger'''
snake_case : Tuple = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 )
snake_case : List[str] = sd_pipe(
[prompt] , image=UpperCAmelCase__ , generator=UpperCAmelCase__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
snake_case : List[str] = output.images
snake_case : Tuple = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 )
snake_case : Any = sd_pipe(
[prompt] , image=UpperCAmelCase__ , generator=UpperCAmelCase__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=UpperCAmelCase__ , )[0]
snake_case : str = image[0, -3:, -3:, -1]
snake_case : Tuple = image_from_tuple[0, -3:, -3:, -1]
snake_case : List[Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
snake_case : Optional[Any] = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case : Optional[int] = self.dummy_cond_unet_upscale
snake_case : str = DDPMScheduler()
snake_case : int = DDIMScheduler(prediction_type='''v_prediction''' )
snake_case : List[Any] = self.dummy_vae
snake_case : Optional[Any] = self.dummy_text_encoder
snake_case : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case : List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case : int = Image.fromarray(np.uinta(UpperCAmelCase__ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
snake_case : Dict = StableDiffusionUpscalePipeline(
unet=UpperCAmelCase__ , low_res_scheduler=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , vae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , max_noise_level=350 , )
snake_case : List[str] = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
snake_case : str = '''A painting of a squirrel eating a burger'''
snake_case : Any = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
snake_case : Dict = output.images
assert image.shape[0] == 2
snake_case : Optional[Any] = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 )
snake_case : Optional[int] = sd_pipe(
[prompt] , image=UpperCAmelCase__ , generator=UpperCAmelCase__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
snake_case : Optional[Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Any = self.dummy_cond_unet_upscale
snake_case : int = DDPMScheduler()
snake_case : List[Any] = DDIMScheduler(prediction_type='''v_prediction''' )
snake_case : Tuple = self.dummy_vae
snake_case : List[Any] = self.dummy_text_encoder
snake_case : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case : int = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case : List[str] = Image.fromarray(np.uinta(UpperCAmelCase__ ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
snake_case : List[str] = unet.half()
snake_case : Optional[int] = text_encoder.half()
# make sure here that pndm scheduler skips prk
snake_case : Union[str, Any] = StableDiffusionUpscalePipeline(
unet=UpperCAmelCase__ , low_res_scheduler=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , vae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , max_noise_level=350 , )
snake_case : Tuple = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
snake_case : Union[str, Any] = '''A painting of a squirrel eating a burger'''
snake_case : int = torch.manual_seed(0 )
snake_case : Any = sd_pipe(
[prompt] , image=UpperCAmelCase__ , generator=UpperCAmelCase__ , num_inference_steps=2 , output_type='''np''' , ).images
snake_case : Tuple = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
snake_case : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
snake_case : List[Any] = '''stabilityai/stable-diffusion-x4-upscaler'''
snake_case : Optional[int] = StableDiffusionUpscalePipeline.from_pretrained(UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
snake_case : Any = '''a cat sitting on a park bench'''
snake_case : Tuple = torch.manual_seed(0 )
snake_case : Any = pipe(
prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type='''np''' , )
snake_case : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
snake_case : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
snake_case : Any = '''stabilityai/stable-diffusion-x4-upscaler'''
snake_case : int = StableDiffusionUpscalePipeline.from_pretrained(
UpperCAmelCase__ , torch_dtype=torch.floataa , )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
snake_case : Tuple = '''a cat sitting on a park bench'''
snake_case : int = torch.manual_seed(0 )
snake_case : List[Any] = pipe(
prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type='''np''' , )
snake_case : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowerCAmelCase( self : Any ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
snake_case : Dict = '''stabilityai/stable-diffusion-x4-upscaler'''
snake_case : Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained(
UpperCAmelCase__ , torch_dtype=torch.floataa , )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case : List[str] = '''a cat sitting on a park bench'''
snake_case : Optional[Any] = torch.manual_seed(0 )
snake_case : List[str] = pipe(
prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , generator=UpperCAmelCase__ , num_inference_steps=5 , output_type='''np''' , )
snake_case : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 84
|
import re
def a_ ( __magic_name__ ) -> bool:
"""simple docstring"""
snake_case : List[str] = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(__magic_name__ , __magic_name__ ) )
if __name__ == "__main__":
_a : Any = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 84
| 1
|
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase = logging.get_logger(__name__)
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Tuple = ["""pixel_values"""]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = 32 , __UpperCAmelCase=PILImageResampling.BILINEAR , __UpperCAmelCase = True , **__UpperCAmelCase , ):
__A : Optional[int] = do_resize
__A : List[Any] = do_rescale
__A : Optional[Any] = size_divisor
__A : List[Any] = resample
super().__init__(**__UpperCAmelCase )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ):
__A , __A : Dict = get_image_size(__UpperCAmelCase )
# Rounds the height and width down to the closest multiple of size_divisor
__A : Dict = height // size_divisor * size_divisor
__A : Any = width // size_divisor * size_divisor
__A : Tuple = resize(__UpperCAmelCase , (new_h, new_w) , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
return image
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ):
return rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
__A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
__A : Dict = do_rescale if do_rescale is not None else self.do_rescale
__A : Dict = size_divisor if size_divisor is not None else self.size_divisor
__A : Any = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("size_divisor is required for resizing" )
__A : str = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError("Invalid image(s)" )
# All transformations expect numpy arrays.
__A : Tuple = [to_numpy_array(__UpperCAmelCase ) for img in images]
if do_resize:
__A : Dict = [self.resize(__UpperCAmelCase , size_divisor=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_rescale:
__A : Optional[int] = [self.rescale(__UpperCAmelCase , scale=1 / 255 ) for image in images]
__A : Dict = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
__A : Dict = {"pixel_values": images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 520
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class _a ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = """bit"""
lowerCamelCase_ : Dict = ["""preactivation""", """bottleneck"""]
lowerCamelCase_ : Dict = ["""SAME""", """VALID"""]
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=64 , __UpperCAmelCase=[256, 512, 1_024, 2_048] , __UpperCAmelCase=[3, 4, 6, 3] , __UpperCAmelCase="preactivation" , __UpperCAmelCase="relu" , __UpperCAmelCase=None , __UpperCAmelCase=32 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=32 , __UpperCAmelCase=1 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(F"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
__A : Optional[int] = global_padding.upper()
else:
raise ValueError(F"Padding strategy {global_padding} not supported" )
__A : Any = num_channels
__A : int = embedding_size
__A : Optional[Any] = hidden_sizes
__A : Dict = depths
__A : Dict = layer_type
__A : int = hidden_act
__A : Any = global_padding
__A : Optional[Any] = num_groups
__A : Any = drop_path_rate
__A : Tuple = embedding_dynamic_padding
__A : Dict = output_stride
__A : Tuple = width_factor
__A : Any = ["stem"] + [F"stage{idx}" for idx in range(1 , len(__UpperCAmelCase ) + 1 )]
__A , __A : Any = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
| 520
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
_lowercase = StableDiffusionInstructPixaPixPipeline
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
_lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS
_lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Tuple =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE_ : Optional[Any] =PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
SCREAMING_SNAKE_CASE_ : List[Any] =CLIPTextModel(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE_ : Tuple ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
SCREAMING_SNAKE_CASE_ : str =floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE_ : List[str] =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('RGB' )
if str(__UpperCAmelCase ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : Dict =torch.manual_seed(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : str =torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] ={
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] ='cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[str] =StableDiffusionInstructPixaPixPipeline(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] =self.get_dummy_inputs(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =sd_pipe(**__UpperCAmelCase ).images
SCREAMING_SNAKE_CASE_ : List[str] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ : Dict =np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] ='cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : List[Any] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : int =StableDiffusionInstructPixaPixPipeline(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : str =self.get_dummy_inputs(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] ='french fries'
SCREAMING_SNAKE_CASE_ : Any =sd_pipe(**__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple =output.images
SCREAMING_SNAKE_CASE_ : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ : Union[str, Any] =np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : List[str] ='cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : int =self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Optional[int] =StableDiffusionInstructPixaPixPipeline(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] =self.get_dummy_inputs(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] =[inputs['prompt']] * 2
SCREAMING_SNAKE_CASE_ : Optional[Any] =np.array(inputs['image'] ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.from_numpy(__UpperCAmelCase ).unsqueeze(0 ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple =image / 2 + 0.5
SCREAMING_SNAKE_CASE_ : Any =image.permute(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE_ : Dict =image.repeat(2 , 1 , 1 , 1 )
SCREAMING_SNAKE_CASE_ : Dict =sd_pipe(**__UpperCAmelCase ).images
SCREAMING_SNAKE_CASE_ : Union[str, Any] =image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
SCREAMING_SNAKE_CASE_ : List[Any] =np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Any ='cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : int =self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Tuple =EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =StableDiffusionInstructPixaPixPipeline(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] =self.get_dummy_inputs(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int =sd_pipe(**__UpperCAmelCase ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[round(__UpperCAmelCase , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(__UpperCAmelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ : List[Any] =np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowerCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : List[str] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[Any] =StableDiffusionInstructPixaPixPipeline(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =VaeImageProcessor(do_resize=__UpperCAmelCase , do_normalize=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] =pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : str =pipe(**self.get_dummy_inputs_by_type(__UpperCAmelCase , input_image_type='pt' ) )[0]
SCREAMING_SNAKE_CASE_ : int =components['vae']
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.get_dummy_inputs_by_type(__UpperCAmelCase , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
SCREAMING_SNAKE_CASE_ : int =vae.encode(inputs[image_param] ).latent_dist.mode()
SCREAMING_SNAKE_CASE_ : int =pipe(**__UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] =np.abs(out - out_latents_inputs ).max()
self.assertLess(__UpperCAmelCase , 1E-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self , __UpperCAmelCase=0 ):
SCREAMING_SNAKE_CASE_ : Tuple =torch.manual_seed(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
SCREAMING_SNAKE_CASE_ : Any ={
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : str =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ : List[str] =self.get_inputs()
SCREAMING_SNAKE_CASE_ : Tuple =pipe(**__UpperCAmelCase ).images
SCREAMING_SNAKE_CASE_ : Dict =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Optional[int] =np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.get_inputs()
SCREAMING_SNAKE_CASE_ : Dict =pipe(**__UpperCAmelCase ).images
SCREAMING_SNAKE_CASE_ : str =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : int =np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] =DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ : str =self.get_inputs()
SCREAMING_SNAKE_CASE_ : Any =pipe(**__UpperCAmelCase ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : int =np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =0
def callback_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> None:
SCREAMING_SNAKE_CASE_ : int =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
SCREAMING_SNAKE_CASE_ : Optional[Any] =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE_ : int =latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Optional[Any] =np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
SCREAMING_SNAKE_CASE_ : int =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE_ : str =latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : int =np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
SCREAMING_SNAKE_CASE_ : List[str] =False
SCREAMING_SNAKE_CASE_ : Tuple =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ : List[str] =pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ : Dict =self.get_inputs()
pipe(**__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __lowerCamelCase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE_ : str =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ : Optional[Any] =pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_ : int =self.get_inputs()
SCREAMING_SNAKE_CASE_ : Dict =pipe(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Dict =self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE_ : Optional[Any] =inputs['image'].resize((504, 504) )
SCREAMING_SNAKE_CASE_ : Dict ='timbrooks/instruct-pix2pix'
SCREAMING_SNAKE_CASE_ : Tuple =StableDiffusionInstructPixaPixPipeline.from_pretrained(
__UpperCAmelCase , safety_checker=__UpperCAmelCase , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ : Optional[int] =pipe(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =output.images[0]
SCREAMING_SNAKE_CASE_ : List[str] =image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
SCREAMING_SNAKE_CASE_ : str =np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 717
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : str ,lowerCAmelCase_ : str ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] =get_failure_array(lowerCAmelCase_ )
# 2) Step through text searching for pattern
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] =0, 0 # index into text, pattern
while i < len(lowerCAmelCase_ ):
if pattern[j] == text[i]:
if j == (len(lowerCAmelCase_ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
SCREAMING_SNAKE_CASE_ : Optional[Any] =failure[j - 1]
continue
i += 1
return False
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : str ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =[0]
SCREAMING_SNAKE_CASE_ : List[str] =0
SCREAMING_SNAKE_CASE_ : int =1
while j < len(lowerCAmelCase_ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
SCREAMING_SNAKE_CASE_ : Optional[int] =failure[i - 1]
continue
j += 1
failure.append(lowerCAmelCase_ )
return failure
if __name__ == "__main__":
# Test 1)
__SCREAMING_SNAKE_CASE = 'abc1abc12'
__SCREAMING_SNAKE_CASE = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__SCREAMING_SNAKE_CASE = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__SCREAMING_SNAKE_CASE = 'ABABX'
__SCREAMING_SNAKE_CASE = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
__SCREAMING_SNAKE_CASE = 'AAAB'
__SCREAMING_SNAKE_CASE = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
__SCREAMING_SNAKE_CASE = 'abcdabcy'
__SCREAMING_SNAKE_CASE = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
__SCREAMING_SNAKE_CASE = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 153
| 0
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , __a : Collection[float] | None = None ) -> None:
"""simple docstring"""
if components is None:
__lowercase : Any = []
__lowercase : int = list(__a )
def __len__( self : Any ) -> int:
"""simple docstring"""
return len(self.__components )
def __str__( self : Tuple ) -> str:
"""simple docstring"""
return "(" + ",".join(map(__a , self.__components ) ) + ")"
def __add__( self : Tuple , __a : Vector ) -> Vector:
"""simple docstring"""
__lowercase : Any = len(self )
if size == len(__a ):
__lowercase : Optional[int] = [self.__components[i] + other.component(__a ) for i in range(__a )]
return Vector(__a )
else:
raise Exception("""must have the same size""" )
def __sub__( self : Any , __a : Vector ) -> Vector:
"""simple docstring"""
__lowercase : Union[str, Any] = len(self )
if size == len(__a ):
__lowercase : List[str] = [self.__components[i] - other.component(__a ) for i in range(__a )]
return Vector(__a )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self : Optional[int] , __a : float ) -> Vector:
"""simple docstring"""
...
@overload
def __mul__( self : Optional[int] , __a : Vector ) -> float:
"""simple docstring"""
...
def __mul__( self : Dict , __a : float | Vector ) -> float | Vector:
"""simple docstring"""
if isinstance(__a , (float, int) ):
__lowercase : int = [c * other for c in self.__components]
return Vector(__a )
elif isinstance(__a , __a ) and len(self ) == len(__a ):
__lowercase : Optional[int] = len(self )
__lowercase : Optional[int] = [self.__components[i] * other.component(__a ) for i in range(__a )]
return sum(__a )
else: # error case
raise Exception("""invalid operand!""" )
def lowerCAmelCase ( self : Dict ) -> Vector:
"""simple docstring"""
return Vector(self.__components )
def lowerCAmelCase ( self : List[Any] , __a : int ) -> float:
"""simple docstring"""
if isinstance(__a , __a ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def lowerCAmelCase ( self : List[Any] , __a : int , __a : float ) -> None:
"""simple docstring"""
assert -len(self.__components ) <= pos < len(self.__components )
__lowercase : Union[str, Any] = value
def lowerCAmelCase ( self : str ) -> float:
"""simple docstring"""
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
__lowercase : List[str] = [c**2 for c in self.__components]
return math.sqrt(sum(__a ) )
def lowerCAmelCase ( self : str , __a : Vector , __a : bool = False ) -> float:
"""simple docstring"""
__lowercase : List[str] = self * other
__lowercase : int = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def snake_case_ ( lowerCAmelCase_ : int ):
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
return Vector([0] * dimension )
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (isinstance(lowerCAmelCase_ , lowerCAmelCase_ ))
__lowercase : Optional[int] = [0] * dimension
__lowercase : Dict = 1
return Vector(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : float , lowerCAmelCase_ : Vector , lowerCAmelCase_ : Vector ):
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (isinstance(lowerCAmelCase_ , (int, float) ))
)
return x * scalar + y
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
random.seed(lowerCAmelCase_ )
__lowercase : Union[str, Any] = [random.randint(lowerCAmelCase_ , lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ )]
return Vector(lowerCAmelCase_ )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : int , __a : list[list[float]] , __a : int , __a : int ) -> None:
"""simple docstring"""
__lowercase : int = matrix
__lowercase : int = w
__lowercase : Union[str, Any] = h
def __str__( self : Any ) -> str:
"""simple docstring"""
__lowercase : Any = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : int , __a : Matrix ) -> Matrix:
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__lowercase : Tuple = []
for i in range(self.__height ):
__lowercase : str = [
self.__matrix[i][j] + other.component(__a , __a )
for j in range(self.__width )
]
matrix.append(__a )
return Matrix(__a , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self : List[Any] , __a : Matrix ) -> Matrix:
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__lowercase : List[str] = []
for i in range(self.__height ):
__lowercase : Union[str, Any] = [
self.__matrix[i][j] - other.component(__a , __a )
for j in range(self.__width )
]
matrix.append(__a )
return Matrix(__a , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self : Dict , __a : float ) -> Matrix:
"""simple docstring"""
...
@overload
def __mul__( self : Tuple , __a : Vector ) -> Vector:
"""simple docstring"""
...
def __mul__( self : int , __a : float | Vector ) -> Vector | Matrix:
"""simple docstring"""
if isinstance(__a , __a ): # matrix-vector
if len(__a ) == self.__width:
__lowercase : List[Any] = zero_vector(self.__height )
for i in range(self.__height ):
__lowercase : Optional[Any] = [
self.__matrix[i][j] * other.component(__a )
for j in range(self.__width )
]
ans.change_component(__a , sum(__a ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(__a , (int, float) ): # matrix-scalar
__lowercase : Optional[Any] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__a , self.__width , self.__height )
return None
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
return self.__height
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
return self.__width
def lowerCAmelCase ( self : int , __a : int , __a : int ) -> float:
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def lowerCAmelCase ( self : List[str] , __a : int , __a : int , __a : float ) -> None:
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
__lowercase : List[str] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def lowerCAmelCase ( self : int , __a : int , __a : int ) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
__lowercase : Union[str, Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__a ) ):
__lowercase : str = minor[i][:y] + minor[i][y + 1 :]
return Matrix(__a , self.__width - 1 , self.__height - 1 ).determinant()
def lowerCAmelCase ( self : Optional[Any] , __a : int , __a : int ) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__a , __a )
else:
raise Exception("""Indices out of bounds""" )
def lowerCAmelCase ( self : Dict ) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__lowercase : Tuple = [
self.__matrix[0][y] * self.cofactor(0 , __a ) for y in range(self.__width )
]
return sum(__a )
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : list[list[float]] = [[0] * n for _ in range(lowerCAmelCase_ )]
return Matrix(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
random.seed(lowerCAmelCase_ )
__lowercase : list[list[float]] = [
[random.randint(lowerCAmelCase_ , lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ )] for _ in range(lowerCAmelCase_ )
]
return Matrix(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
| 149
|
from __future__ import annotations
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : str , __a : Dict=None ) -> int:
"""simple docstring"""
__lowercase : int = data
__lowercase : Optional[int] = None
def __repr__( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = []
__lowercase : Dict = self
while temp:
string_rep.append(F"{temp.data}" )
__lowercase : Union[str, Any] = temp.next
return "->".join(__a )
def snake_case_ ( lowerCAmelCase_ : list ):
if not elements_list:
raise Exception("""The Elements List is empty""" )
__lowercase : List[str] = Node(elements_list[0] )
for i in range(1 , len(lowerCAmelCase_ ) ):
__lowercase : int = Node(elements_list[i] )
__lowercase : List[str] = current.next
return head
def snake_case_ ( lowerCAmelCase_ : Node ):
if head_node is not None and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
print_reverse(head_node.next )
print(head_node.data )
def snake_case_ ( ):
from doctest import testmod
testmod()
__lowercase : List[Any] = make_linked_list([14, 52, 14, 12, 43] )
print("""Linked List:""" )
print(lowerCAmelCase_ )
print("""Elements in Reverse:""" )
print_reverse(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 149
| 1
|
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
a__ : Optional[int] = 4
a__ : Union[str, Any] = 3
class lowercase_ ( SCREAMING_SNAKE_CASE__ ):
pass
def _UpperCamelCase ( __A ) -> Any:
'''simple docstring'''
for shard in shards:
for i in range(__UpperCamelCase ):
yield {"i": i, "shard": shard}
def _UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = int(os.environ["RANK"] )
UpperCamelCase__ = int(os.environ["WORLD_SIZE"] )
UpperCamelCase__ = ArgumentParser()
parser.add_argument("--streaming" , type=__UpperCamelCase )
parser.add_argument("--local_rank" , type=__UpperCamelCase )
parser.add_argument("--num_workers" , type=__UpperCamelCase , default=0 )
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = args.streaming
UpperCamelCase__ = args.num_workers
UpperCamelCase__ = {"""shards""": [F'''shard_{shard_idx}''' for shard_idx in range(__UpperCamelCase )]}
UpperCamelCase__ = IterableDataset.from_generator(__UpperCamelCase , gen_kwargs=__UpperCamelCase )
if not streaming:
UpperCamelCase__ = Dataset.from_list(list(__UpperCamelCase ) )
UpperCamelCase__ = split_dataset_by_node(__UpperCamelCase , rank=__UpperCamelCase , world_size=__UpperCamelCase )
UpperCamelCase__ = torch.utils.data.DataLoader(__UpperCamelCase , num_workers=__UpperCamelCase )
UpperCamelCase__ = NUM_SHARDS * NUM_ITEMS_PER_SHARD
UpperCamelCase__ = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
UpperCamelCase__ = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 721
|
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a__ : List[Any] = logging.get_logger(__name__)
def _UpperCamelCase ( __A , __A , __A , __A ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(__A , __A , __A=0 , __A=None ):
UpperCamelCase__ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
UpperCamelCase__ = math.floor(val / multiple ) * multiple
if x < min_val:
UpperCamelCase__ = math.ceil(val / multiple ) * multiple
return x
UpperCamelCase__ = (output_size, output_size) if isinstance(__A , __A ) else output_size
UpperCamelCase__ , UpperCamelCase__ = get_image_size(__A )
UpperCamelCase__ , UpperCamelCase__ = output_size
# determine new height and width
UpperCamelCase__ = output_height / input_height
UpperCamelCase__ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
UpperCamelCase__ = scale_width
else:
# fit height
UpperCamelCase__ = scale_height
UpperCamelCase__ = constraint_to_multiple_of(scale_height * input_height , multiple=__A )
UpperCamelCase__ = constraint_to_multiple_of(scale_width * input_width , multiple=__A )
return (new_height, new_width)
class lowercase_ ( a__ ):
__UpperCAmelCase = ['pixel_values']
def __init__( self , a = True , a = None , a = PILImageResampling.BILINEAR , a = False , a = 1 , a = True , a = 1 / 2_55 , a = True , a = None , a = None , **a , ):
super().__init__(**a )
UpperCamelCase__ = size if size is not None else {"height": 3_84, "width": 3_84}
UpperCamelCase__ = get_size_dict(a )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = keep_aspect_ratio
UpperCamelCase__ = ensure_multiple_of
UpperCamelCase__ = resample
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self , a , a , a = False , a = 1 , a = PILImageResampling.BICUBIC , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
UpperCamelCase__ = get_resize_output_image_size(
a , output_size=(size["height"], size["width"]) , keep_aspect_ratio=a , multiple=a , )
return resize(a , size=a , resample=a , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
return rescale(a , scale=a , data_format=a , **a )
def __a ( self , a , a , a , a = None , **a , ):
return normalize(a , mean=a , std=a , data_format=a , **a )
def __a ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(a )
UpperCamelCase__ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCamelCase__ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(a ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=a , mean=a , std=a ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(a , a ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
def __a ( self , a , a = None ):
UpperCamelCase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a ) != len(a ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(a ):
UpperCamelCase__ = target_sizes.numpy()
UpperCamelCase__ = []
for idx in range(len(a ) ):
UpperCamelCase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=a )
UpperCamelCase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a )
else:
UpperCamelCase__ = logits.argmax(dim=1 )
UpperCamelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 223
| 0
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __lowerCamelCase ( A__ : Optional[int] , A__ : Optional[int] ) -> List[Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
lowerCamelCase_ : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
lowerCamelCase_ : Dict = torch.permute(a__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(a__ ):
# linear layer
lowerCamelCase_ : Any = flax_key_tuple[:-1] + ("""weight""",)
lowerCamelCase_ : Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCamelCase_ : Dict = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def __lowerCamelCase ( A__ : Tuple , A__ : Optional[int] , A__ : Any ) -> int:
if "metadata" in layer:
lowerCamelCase_ : List[Any] = layer.split("""metadata""" )
lowerCamelCase_ : Dict = """""".join(split_layer[0] )[:-1]
lowerCamelCase_ : Union[str, Any] = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
lowerCamelCase_ : Optional[int] = layer.split("""kvstore""" )
lowerCamelCase_ : Tuple = """""".join(split_layer[0] )[:-1]
lowerCamelCase_ : Tuple = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
lowerCamelCase_ : Optional[Any] = layer.split("""/""" )
lowerCamelCase_ : List[Any] = """/""".join(split_layer[:-1] )
lowerCamelCase_ : int = (split_layer[-1],)
if "kvstore/path" in layer:
lowerCamelCase_ : Any = f'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
lowerCamelCase_ : Dict = """file"""
else:
lowerCamelCase_ : Optional[Any] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __lowerCamelCase ( A__ : Union[str, Any] , A__ : int ) -> Optional[int]:
lowerCamelCase_ : Union[str, Any] = rename_keys(a__ )
lowerCamelCase_ : Any = {}
for k, v in current_block.items():
lowerCamelCase_ : Optional[int] = v
lowerCamelCase_ : str = new_current_block
torch.save(a__ , a__ )
def __lowerCamelCase ( A__ : Dict , A__ : Optional[Any] , A__ : Any , A__ : Union[str, Any] , A__ : Tuple = WEIGHTS_NAME ) -> Optional[Any]:
lowerCamelCase_ : Tuple = convert_file_size_to_int(a__ )
lowerCamelCase_ : Union[str, Any] = []
lowerCamelCase_ : Any = {}
lowerCamelCase_ : Any = 0
lowerCamelCase_ : str = 0
os.makedirs(a__ , exist_ok=a__ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
lowerCamelCase_ : int = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
lowerCamelCase_ : Optional[Any] = flatten_dict(a__ , sep="""/""" )
lowerCamelCase_ : Any = {}
for layer in checkpoint_info.keys():
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = get_key_and_tensorstore_dict(
a__ , a__ , a__ )
if curr_real_layer_name in all_layers:
lowerCamelCase_ : Any = content
else:
lowerCamelCase_ : Optional[Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
lowerCamelCase_ : Tuple = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
lowerCamelCase_ : Optional[int] = torch.tensor(a__ )
lowerCamelCase_ : int = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
lowerCamelCase_, lowerCamelCase_ : Dict = rename_base_flax_keys(tuple(key.split("""/""" ) ) , a__ )
lowerCamelCase_ : str = """/""".join(a__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
lowerCamelCase_ : int = os.path.join(
a__ , weights_name.replace(""".bin""" , f'''-{len(a__ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(a__ , a__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
lowerCamelCase_ : Dict = {}
lowerCamelCase_ : Optional[int] = 0
lowerCamelCase_ : Optional[int] = raw_weights.to(getattr(a__ , a__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
lowerCamelCase_ : List[str] = os.path.join(a__ , weights_name.replace(""".bin""" , f'''-{len(a__ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(a__ , a__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(a__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
lowerCamelCase_ : Optional[Any] = {}
lowerCamelCase_ : Optional[int] = {}
for idx, shard in enumerate(a__ ):
lowerCamelCase_ : Union[str, Any] = weights_name.replace(
""".bin""" , f'''-{idx+1:05d}-of-{len(a__ ):05d}.bin''' ) # len(sharded_state_dicts):05d}
lowerCamelCase_ : List[Any] = os.path.join(a__ , weights_name.replace(""".bin""" , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(a__ , os.path.join(a__ , a__ ) )
lowerCamelCase_ : List[Any] = shard
for key in shard:
lowerCamelCase_ : Optional[Any] = shard_file
# Add the metadata
lowerCamelCase_ : Union[str, Any] = {"""total_size""": total_size}
lowerCamelCase_ : Dict = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(a__ , a__ ) , """w""" , encoding="""utf-8""" ) as f:
lowerCamelCase_ : Optional[int] = json.dumps(a__ , indent=2 , sort_keys=a__ ) + """\n"""
f.write(a__ )
return metadata, index
if __name__ == "__main__":
snake_case__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
snake_case__ : List[str] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __lowerCamelCase ( ) -> List[str]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
lowerCamelCase_ : Optional[int] = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
lowerCamelCase_ : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
lowerCamelCase_ : Union[str, Any] = TaTokenizer.from_pretrained("""t5-small""" )
lowerCamelCase_ : Optional[Any] = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
lowerCamelCase_ : Union[str, Any] = tokenizer(a__ , return_tensors="""pt""" ).input_ids
lowerCamelCase_ : Tuple = model.generate(a__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 278
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A : Dict = logging.get_logger(__name__)
A : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> int:
for attribute in key.split('''.''' ):
__a = getattr(a__ , a__ )
if weight_type is not None:
__a = getattr(a__ , a__ ).shape
else:
__a = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
else:
__a = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Optional[int]:
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == '''group''' , )
__a = True
else:
for key, mapped_key in MAPPING.items():
__a = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
__a = True
if "*" in mapped_key:
__a = name.split(a__ )[0].split('''.''' )[-2]
__a = mapped_key.replace('''*''' , a__ )
if "weight_g" in name:
__a = '''weight_g'''
elif "weight_v" in name:
__a = '''weight_v'''
elif "weight" in name:
__a = '''weight'''
elif "bias" in name:
__a = '''bias'''
else:
__a = None
set_recursively(a__ , a__ , a__ , a__ , a__ )
continue
if not is_used:
unused_weights.append(a__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> Optional[int]:
__a = full_name.split('''conv_layers.''' )[-1]
__a = name.split('''.''' )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__a = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__a = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__a = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__a = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(a__ )
@torch.no_grad()
def __lowerCAmelCase ( a__ , a__ , a__=None , a__=None , a__=True ) -> Optional[Any]:
if config_path is not None:
__a = HubertConfig.from_pretrained(a__ )
else:
__a = HubertConfig()
if is_finetuned:
if dict_path:
__a = Dictionary.load(a__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a = target_dict.pad_index
__a = target_dict.bos_index
__a = target_dict.eos_index
__a = len(target_dict.symbols )
__a = os.path.join(a__ , '''vocab.json''' )
if not os.path.isdir(a__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(a__ ) )
return
os.makedirs(a__ , exist_ok=a__ )
with open(a__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , a__ )
__a = WavaVecaCTCTokenizer(
a__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=a__ , )
__a = True if config.feat_extract_norm == '''layer''' else False
__a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=a__ , return_attention_mask=a__ , )
__a = WavaVecaProcessor(feature_extractor=a__ , tokenizer=a__ )
processor.save_pretrained(a__ )
__a = HubertForCTC(a__ )
else:
__a = HubertModel(a__ )
if is_finetuned:
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__a = model[0].eval()
recursively_load_weights(a__ , a__ , a__ )
hf_wavavec.save_pretrained(a__ )
if __name__ == "__main__":
A : Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
A : str = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 219
| 0
|
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase :
def __init__( self :Optional[int] ):
'''simple docstring'''
lowercase__ = ""
lowercase__ = ""
lowercase__ = []
lowercase__ = 0
lowercase__ = 2_56
lowercase__ = 0
lowercase__ = 0
lowercase__ = 0
lowercase__ = 0
def UpperCAmelCase ( self :List[str] , _lowercase :Any ):
'''simple docstring'''
lowercase__ = cva.imread(_lowercase , 0 )
lowercase__ = copy.deepcopy(self.img )
lowercase__ , lowercase__ , lowercase__ = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label="x" )
lowercase__ = np.sum(_lowercase )
for i in range(len(_lowercase ) ):
lowercase__ = x[i] / self.k
self.sk += prk
lowercase__ = (self.L - 1) * self.sk
if self.rem != 0:
lowercase__ = int(last % last )
lowercase__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_lowercase )
lowercase__ = int(np.ma.count(self.img ) / self.img[1].size )
lowercase__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowercase__ = self.img[j][i]
if num != self.last_list[num]:
lowercase__ = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
_snake_case = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
_snake_case = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 611
|
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_snake_case = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def _A ( __magic_name__ , __magic_name__ , __magic_name__=None ):
if rng is None:
lowercase__ = random.Random()
lowercase__ = 1
for dim in shape:
total_dims *= dim
lowercase__ = []
for _ in range(__magic_name__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowercase__ = np.array(__magic_name__ , dtype=jnp.intaa ).reshape(__magic_name__ )
return output
def _A ( __magic_name__ , __magic_name__=None ):
lowercase__ = ids_tensor(__magic_name__ , vocab_size=2 , rng=__magic_name__ )
# make sure that at least one token is attended to for each batch
lowercase__ = 1
return attn_mask
@require_flax
class lowerCAmelCase :
__lowerCamelCase = None
__lowerCamelCase = ()
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowercase__ = 2
lowercase__ = inputs["input_ids"].shape[-1] // 2
lowercase__ = inputs["input_ids"][:max_batch_size, :sequence_length]
lowercase__ = jnp.ones_like(_lowercase )
lowercase__ = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowercase__ = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowercase__ = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
lowercase__ = False
lowercase__ = max_length
lowercase__ = 0
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase__ = getattr(_lowercase , _lowercase )
lowercase__ = pt_model_class(_lowercase ).eval()
lowercase__ = load_flax_weights_in_pytorch_model(_lowercase , flax_model.params )
lowercase__ = flax_model.generate(_lowercase ).sequences
lowercase__ = pt_model.generate(torch.tensor(_lowercase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowercase__ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
lowercase__ = False
lowercase__ = max_length
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
lowercase__ = jit(model.generate )
lowercase__ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
lowercase__ = True
lowercase__ = max_length
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
lowercase__ = jit(model.generate )
lowercase__ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
lowercase__ = False
lowercase__ = max_length
lowercase__ = 2
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
lowercase__ = jit(model.generate )
lowercase__ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
lowercase__ = False
lowercase__ = max_length
lowercase__ = 2
lowercase__ = 2
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
lowercase__ = True
lowercase__ = max_length
lowercase__ = 0.8
lowercase__ = 10
lowercase__ = 0.3
lowercase__ = 1
lowercase__ = 8
lowercase__ = 9
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
lowercase__ = jit(model.generate )
lowercase__ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
lowercase__ = max_length
lowercase__ = 1
lowercase__ = 8
lowercase__ = 9
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
lowercase__ = jit(model.generate )
lowercase__ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
lowercase__ = max_length
lowercase__ = 2
lowercase__ = 1
lowercase__ = 8
lowercase__ = 9
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
lowercase__ = jit(model.generate )
lowercase__ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase__ = attention_mask.at[(0, 0)].set(0 )
lowercase__ = False
lowercase__ = max_length
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
lowercase__ = jit(model.generate )
lowercase__ = jit_generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase__ = attention_mask.at[(0, 0)].set(0 )
lowercase__ = True
lowercase__ = max_length
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
lowercase__ = jit(model.generate )
lowercase__ = jit_generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase__ = attention_mask.at[(0, 0)].set(0 )
lowercase__ = 2
lowercase__ = max_length
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
lowercase__ = jit(model.generate )
lowercase__ = jit_generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
lowercase__ = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
lowercase__ = "Hello world"
lowercase__ = tokenizer(_lowercase , return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_lowercase , "do_samples" ):
model.generate(_lowercase , do_samples=_lowercase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_lowercase , "foo" ):
lowercase__ = {"foo": "bar"}
model.generate(_lowercase , **_lowercase )
| 611
| 1
|
"""simple docstring"""
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
UpperCAmelCase : Dict = True
from torch.cuda.amp import autocast
UpperCAmelCase : str = logging.getLogger(__name__)
def lowerCamelCase ( _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : List[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_UpperCamelCase )
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__a = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__a = field(
default=A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__a = field(
default=A , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
__a = field(
default=0.1 , metadata={"""help""": """The dropout ratio for the attention probabilities."""} )
__a = field(
default=0.1 , metadata={"""help""": """The dropout ratio for activations inside the fully connected layer."""} )
__a = field(
default=0.1 , metadata={
"""help""": """The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."""
} , )
__a = field(
default=0.1 , metadata={"""help""": """The dropout probabilitiy for all 1D convolutional layers in feature extractor."""} , )
__a = field(
default=0.0_5 , metadata={
"""help""": (
"""Propability of each feature vector along the time axis to be chosen as the start of the vector"""
"""span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"""
"""vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."""
)
} , )
__a = field(default=0.0 , metadata={"""help""": """The LayerDrop probability."""} )
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__a = field(
default=A , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__a = field(
default="""train+validation""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
__a = field(
default=A , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
__a = field(
default=A , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__a = field(
default=A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__a = field(
default=A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of validation examples to this """
"""value if set."""
)
} , )
__a = list_field(
default=[""",""", """?""", """.""", """!""", """-""", """;""", """:""", """\"\"""", """%""", """'""", """\"""", """�"""] , metadata={"""help""": """A list of characters to remove from the transcripts."""} , )
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__a = 42
__a = True
__a = None
__a = None
__a = None
__a = None
def __call__( self : Dict , UpperCamelCase : List[Dict[str, Union[List[int], torch.Tensor]]] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = [{"""input_values""": feature["""input_values"""]} for feature in features]
__UpperCAmelCase : Any = [{"""input_ids""": feature["""labels"""]} for feature in features]
__UpperCAmelCase : Union[str, Any] = self.processor.pad(
UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
__UpperCAmelCase : Tuple = self.processor.pad(
labels=UpperCamelCase , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="""pt""" , )
# replace padding with -100 to ignore loss correctly
__UpperCAmelCase : Union[str, Any] = labels_batch["""input_ids"""].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
__UpperCAmelCase : Any = labels
return batch
class lowerCamelCase__ ( A ):
"""simple docstring"""
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : nn.Module , UpperCamelCase : Dict[str, Union[torch.Tensor, Any]] ):
'''simple docstring'''
model.train()
__UpperCAmelCase : List[str] = self._prepare_inputs(UpperCamelCase )
if self.use_amp:
with autocast():
__UpperCAmelCase : Union[str, Any] = self.compute_loss(UpperCamelCase , UpperCamelCase )
else:
__UpperCAmelCase : Any = self.compute_loss(UpperCamelCase , UpperCamelCase )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__UpperCAmelCase : str = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__UpperCAmelCase : List[str] = loss.sum() / (inputs["""labels"""] >= 0).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
__UpperCAmelCase : int = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(UpperCamelCase ).backward()
elif self.use_apex:
with amp.scale_loss(UpperCamelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(UpperCamelCase )
else:
loss.backward()
return loss.detach()
def lowerCamelCase ( ) -> str:
'''simple docstring'''
__UpperCAmelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__UpperCAmelCase : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCAmelCase : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , _UpperCamelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__UpperCAmelCase : Optional[int] = datasets.load_dataset(
"""common_voice""" , data_args.dataset_config_name , split=data_args.train_split_name )
__UpperCAmelCase : str = datasets.load_dataset("""common_voice""" , data_args.dataset_config_name , split="""test""" )
# Create and save tokenizer
__UpperCAmelCase : Any = f'''[{"".join(data_args.chars_to_ignore )}]'''
def remove_special_characters(_UpperCamelCase : Any ):
__UpperCAmelCase : int = re.sub(_UpperCamelCase , """""" , batch["""sentence"""] ).lower() + """ """
return batch
__UpperCAmelCase : Union[str, Any] = train_dataset.map(_UpperCamelCase , remove_columns=["""sentence"""] )
__UpperCAmelCase : str = eval_dataset.map(_UpperCamelCase , remove_columns=["""sentence"""] )
def extract_all_chars(_UpperCamelCase : Optional[Any] ):
__UpperCAmelCase : Any = """ """.join(batch["""text"""] )
__UpperCAmelCase : str = list(set(_UpperCamelCase ) )
return {"vocab": [vocab], "all_text": [all_text]}
__UpperCAmelCase : Union[str, Any] = train_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , batch_size=-1 , keep_in_memory=_UpperCamelCase , remove_columns=train_dataset.column_names , )
__UpperCAmelCase : str = train_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , batch_size=-1 , keep_in_memory=_UpperCamelCase , remove_columns=eval_dataset.column_names , )
__UpperCAmelCase : List[str] = list(set(vocab_train["""vocab"""][0] ) | set(vocab_test["""vocab"""][0] ) )
__UpperCAmelCase : Optional[Any] = {v: k for k, v in enumerate(_UpperCamelCase )}
__UpperCAmelCase : Dict = vocab_dict[""" """]
del vocab_dict[" "]
__UpperCAmelCase : Any = len(_UpperCamelCase )
__UpperCAmelCase : Tuple = len(_UpperCamelCase )
with open("""vocab.json""" , """w""" ) as vocab_file:
json.dump(_UpperCamelCase , _UpperCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase : Dict = WavaVecaCTCTokenizer(
"""vocab.json""" , unk_token="""[UNK]""" , pad_token="""[PAD]""" , word_delimiter_token="""|""" , )
__UpperCAmelCase : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0.0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase )
__UpperCAmelCase : Optional[Any] = WavaVecaProcessor(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase )
__UpperCAmelCase : List[Any] = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="""mean""" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
__UpperCAmelCase : Optional[int] = min(len(_UpperCamelCase ) , data_args.max_train_samples )
__UpperCAmelCase : Optional[int] = train_dataset.select(range(_UpperCamelCase ) )
if data_args.max_val_samples is not None:
__UpperCAmelCase : Dict = eval_dataset.select(range(data_args.max_val_samples ) )
__UpperCAmelCase : str = torchaudio.transforms.Resample(4_8_0_0_0 , 1_6_0_0_0 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(_UpperCamelCase : Union[str, Any] ):
__UpperCAmelCase ,__UpperCAmelCase : List[Any] = torchaudio.load(batch["""path"""] )
__UpperCAmelCase : int = resampler(_UpperCamelCase ).squeeze().numpy()
__UpperCAmelCase : int = 1_6_0_0_0
__UpperCAmelCase : Dict = batch["""text"""]
return batch
__UpperCAmelCase : Tuple = train_dataset.map(
_UpperCamelCase , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
__UpperCAmelCase : List[str] = eval_dataset.map(
_UpperCamelCase , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(_UpperCamelCase : Any ):
# check that all files have the correct sampling rate
assert (
len(set(batch["""sampling_rate"""] ) ) == 1
), f'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
__UpperCAmelCase : Dict = processor(
audio=batch["""speech"""] , text=batch["""target_text"""] , sampling_rate=batch["""sampling_rate"""][0] )
batch.update(_UpperCamelCase )
return batch
__UpperCAmelCase : Optional[int] = train_dataset.map(
_UpperCamelCase , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , )
__UpperCAmelCase : Optional[Any] = eval_dataset.map(
_UpperCamelCase , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , )
# Metric
__UpperCAmelCase : Any = datasets.load_metric("""wer""" )
def compute_metrics(_UpperCamelCase : List[str] ):
__UpperCAmelCase : Tuple = pred.predictions
__UpperCAmelCase : Optional[Any] = np.argmax(_UpperCamelCase , axis=-1 )
__UpperCAmelCase : int = processor.tokenizer.pad_token_id
__UpperCAmelCase : Dict = processor.batch_decode(_UpperCamelCase )
# we do not want to group tokens when computing the metrics
__UpperCAmelCase : Dict = processor.batch_decode(pred.label_ids , group_tokens=_UpperCamelCase )
__UpperCAmelCase : str = wer_metric.compute(predictions=_UpperCamelCase , references=_UpperCamelCase )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__UpperCAmelCase : List[str] = DataCollatorCTCWithPadding(processor=_UpperCamelCase , padding=_UpperCamelCase )
# Initialize our Trainer
__UpperCAmelCase : Union[str, Any] = CTCTrainer(
model=_UpperCamelCase , data_collator=_UpperCamelCase , args=_UpperCamelCase , compute_metrics=_UpperCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__UpperCAmelCase : Optional[int] = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__UpperCAmelCase : int = model_args.model_name_or_path
else:
__UpperCAmelCase : Tuple = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__UpperCAmelCase : int = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
__UpperCAmelCase : Union[str, Any] = train_result.metrics
__UpperCAmelCase : Any = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCamelCase )
)
__UpperCAmelCase : Union[str, Any] = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics("""train""" , _UpperCamelCase )
trainer.save_metrics("""train""" , _UpperCamelCase )
trainer.save_state()
# Evaluation
__UpperCAmelCase : Any = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__UpperCAmelCase : Any = trainer.evaluate()
__UpperCAmelCase : Union[str, Any] = data_args.max_val_samples if data_args.max_val_samples is not None else len(_UpperCamelCase )
__UpperCAmelCase : Any = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics("""eval""" , _UpperCamelCase )
trainer.save_metrics("""eval""" , _UpperCamelCase )
return results
if __name__ == "__main__":
main()
| 139
|
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def lowerCamelCase ( ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Any = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
__UpperCAmelCase : Any = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(_UpperCamelCase )
# Let's go
__UpperCAmelCase : int = parser.parse_args()
if not hasattr(_UpperCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__UpperCAmelCase : List[str] = args.func(_UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 139
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
_lowerCAmelCase = logging.get_logger(__name__)
class UpperCamelCase (lowercase__ ):
def __init__( self :List[Any] , *__magic_name__ :Any , **__magic_name__ :Optional[Any] ) ->None:
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 704
|
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def UpperCamelCase ( _A , _A , _A=0 ) -> Any:
# Format the message.
if name is None:
lowercase : Tuple = None
else:
lowercase : Any = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
lowercase : List[str] = fmt.format(_A )
# Print and recurse (if needed).
if isinstance(_A , _A ):
if msg is not None:
print(_A )
for k in val.keys():
recursive_print(_A , val[k] , spaces + 2 )
elif isinstance(_A , torch.Tensor ):
print(_A , """:""" , val.size() )
else:
print(_A , """:""" , _A )
def UpperCamelCase ( _A , _A , _A , _A , _A ) -> Optional[int]:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
lowercase : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowercase : str = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowercase : Dict = param.view(*_A )
lowercase : str = param.transpose(0 , 2 )
lowercase : Optional[int] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowercase : Union[str, Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowercase : Any = param.view(*_A )
lowercase : Optional[int] = param.transpose(0 , 1 ).contiguous()
lowercase : Any = param.view(*_A )
return param
def UpperCamelCase ( _A , _A , _A ) -> List[str]:
# The converted output model.
lowercase : str = {}
# old versions did not store training args
lowercase : Optional[int] = input_state_dict.get("""args""" , _A )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowercase : List[Any] = ds_args.padded_vocab_size
lowercase : int = ds_args.max_position_embeddings
lowercase : Optional[Any] = ds_args.hidden_size
lowercase : int = ds_args.num_layers
lowercase : Union[str, Any] = ds_args.num_attention_heads
lowercase : List[str] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowercase : int = config.n_head
# The hidden_size per head.
lowercase : Union[str, Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowercase : List[str] = input_state_dict["""checkpoint_version"""]
else:
lowercase : List[str] = 0.0
# The model.
lowercase : Tuple = input_state_dict["""model"""]
# The language model.
lowercase : Optional[int] = model["""language_model"""]
# The embeddings.
lowercase : Optional[int] = lm["""embedding"""]
# The word embeddings.
lowercase : Union[str, Any] = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
lowercase : Tuple = word_embeddings[: config.vocab_size, :]
lowercase : Tuple = word_embeddings
# The position embeddings.
lowercase : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowercase : Any = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
lowercase : Optional[int] = pos_embeddings
# The transformer.
lowercase : str = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
lowercase : str = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
lowercase : Optional[Any] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowercase : int = layer_re.match(_A )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowercase : Optional[int] = int(m.group(1 ) )
# The name of the operation.
lowercase : Union[str, Any] = m.group(2 )
# Is it a weight or a bias?
lowercase : Dict = m.group(3 )
# The name of the layer.
lowercase : List[Any] = F"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
lowercase : List[str] = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
lowercase : Dict = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowercase : Optional[int] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _A , _A )
lowercase : List[str] = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowercase : str = torch.tensor(-1e4 , dtype=torch.floataa )
lowercase : Tuple = masked_bias
lowercase : str = fix_query_key_value_ordering(_A , _A , 3 , _A , _A )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowercase : int = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowercase : List[Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowercase : str = fix_query_key_value_ordering(_A , _A , 3 , _A , _A )
# Store. No change of shape.
lowercase : List[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowercase : Optional[int] = megatron_to_transformers[op_name]
lowercase : int = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowercase : Union[str, Any] = megatron_to_transformers[op_name]
lowercase : str = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowercase : Dict = transformer["""final_layernorm.weight"""]
lowercase : Any = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
lowercase : int = word_embeddings
# It should be done!
return output_state_dict
def UpperCamelCase ( ) -> int:
# Create the argument parser.
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=_A , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=_A , help="""An optional config json file describing the pre-trained model.""" , )
lowercase : Dict = parser.parse_args()
# Extract the basename.
lowercase : Union[str, Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
lowercase : Any = torch.load(_A , map_location="""cpu""" )
else:
lowercase : Tuple = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
lowercase : Dict = input_state_dict.get("""args""" , _A )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowercase : Optional[int] = """gelu_fast"""
elif ds_args.openai_gelu:
lowercase : int = """gelu_new"""
else:
lowercase : Tuple = """gelu"""
else:
# in the very early days this used to be "gelu_new"
lowercase : List[str] = """gelu_new"""
# Spell out all parameters in case the defaults change.
lowercase : Optional[Any] = GPTaConfig(
vocab_size=50_257 , n_positions=1_024 , n_embd=1_024 , n_layer=24 , n_head=16 , n_inner=4_096 , activation_function=_A , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=_A , summary_activation=_A , summary_proj_to_labels=_A , summary_first_dropout=0.1 , scale_attn_weights=_A , use_cache=_A , bos_token_id=50_256 , eos_token_id=50_256 , )
else:
lowercase : int = GPTaConfig.from_json_file(args.config_file )
lowercase : Optional[Any] = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
lowercase : List[str] = convert_megatron_checkpoint(_A , _A , _A )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_A , _A )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowercase : Optional[Any] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowercase : Tuple = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
lowercase : Optional[int] = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
lowercase : Optional[Any] = """gpt2"""
lowercase : int = AutoTokenizer.from_pretrained(_A )
lowercase : Union[str, Any] = type(_A ).__name__
lowercase : Any = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(_A )
# Save tokenizer based on args
print(F"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(_A )
# Store the state_dict to file.
lowercase : Any = os.path.join(_A , """pytorch_model.bin""" )
print(F"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(_A , _A )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 348
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : List[str] = tempfile.mkdtemp()
# fmt: off
__snake_case : List[Any] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
__snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__snake_case : Tuple = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
__snake_case : int = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self , **_UpperCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , **_UpperCAmelCase ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self ):
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : Tuple = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self ):
__snake_case : int = self.get_tokenizer()
__snake_case : Tuple = self.get_image_processor()
__snake_case : List[str] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
__snake_case : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[int] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Optional[int] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__snake_case : List[Any] = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
__snake_case : str = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Any = self.get_image_processor()
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : List[str] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__snake_case : Union[str, Any] = self.prepare_image_inputs()
__snake_case : Union[str, Any] = image_processor(_UpperCAmelCase , return_tensors='np' )
__snake_case : Optional[Any] = processor(images=_UpperCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.get_image_processor()
__snake_case : int = self.get_tokenizer()
__snake_case : str = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__snake_case : Dict = 'lower newer'
__snake_case : Optional[int] = processor(text=_UpperCAmelCase )
__snake_case : str = tokenizer(_UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self ):
__snake_case : Any = self.get_image_processor()
__snake_case : Dict = self.get_tokenizer()
__snake_case : List[Any] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__snake_case : Union[str, Any] = 'lower newer'
__snake_case : List[str] = self.prepare_image_inputs()
__snake_case : Tuple = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(_UpperCAmelCase ):
processor()
def lowercase_ ( self ):
__snake_case : int = self.get_image_processor()
__snake_case : Any = self.get_tokenizer()
__snake_case : int = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__snake_case : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : Union[str, Any] = processor.batch_decode(_UpperCAmelCase )
__snake_case : Union[str, Any] = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = self.get_image_processor()
__snake_case : Optional[int] = self.get_tokenizer()
__snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__snake_case : str = 'lower newer'
__snake_case : Dict = self.prepare_image_inputs()
__snake_case : Optional[int] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 576
|
def UpperCAmelCase__( __UpperCAmelCase : int ):
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__snake_case : str = 4
__snake_case : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
__snake_case : List[str] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 576
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( a , unittest.TestCase ):
_UpperCamelCase = GPTaTokenizer
_UpperCamelCase = GPTaTokenizerFast
_UpperCamelCase = True
_UpperCamelCase = {"""add_prefix_space""": True}
_UpperCamelCase = False
def snake_case ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
A : Union[str, Any] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
A : Optional[int] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
A : Optional[Any] = {'''unk_token''': '''<unk>'''}
A : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_UpperCAmelCase ) )
def snake_case ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def snake_case ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def snake_case ( self , _UpperCAmelCase ):
A : List[Any] = '''lower newer'''
A : List[Any] = '''lower newer'''
return input_text, output_text
def snake_case ( self ):
A : List[str] = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A : Optional[Any] = '''lower newer'''
A : Optional[int] = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
A : List[str] = tokenizer.tokenize(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
A : Optional[int] = tokens + [tokenizer.unk_token]
A : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def snake_case ( self ):
if not self.test_rust_tokenizer:
return
A : Union[str, Any] = self.get_tokenizer()
A : Optional[int] = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase )
A : str = '''lower newer'''
# Testing tokenization
A : Optional[int] = tokenizer.tokenize(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
A : Union[str, Any] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing conversion to ids without special tokens
A : int = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
A : str = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing conversion to ids with special tokens
A : Any = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase )
A : int = tokenizer.encode(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
A : int = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing the unknown token
A : Union[str, Any] = tokens + [rust_tokenizer.unk_token]
A : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def snake_case ( self , _UpperCAmelCase=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
# Simple input
A : Optional[int] = '''This is a simple input'''
A : Union[str, Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
A : Any = ('''This is a simple input''', '''This is a pair''')
A : Tuple = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' , )
def snake_case ( self ):
A : Tuple = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
A : List[str] = '''This is a simple input'''
A : Optional[int] = ['''This is a simple input looooooooong''', '''This is a simple input''']
A : Optional[int] = ('''This is a simple input''', '''This is a pair''')
A : Optional[Any] = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
A : Optional[int] = tokenizer.pad_token_id
A : Any = tokenizer(_UpperCAmelCase , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
A : Optional[Any] = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncate=_UpperCAmelCase , return_tensors='''np''' )
A : Optional[int] = tokenizer(*_UpperCAmelCase , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
A : str = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncate=_UpperCAmelCase , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def snake_case ( self ):
A : Any = '''$$$'''
A : int = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=_UpperCAmelCase , add_bos_token=_UpperCAmelCase )
A : Any = '''This is a simple input'''
A : Union[str, Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
A : int = tokenizer.bos_token_id
A : int = tokenizer(_UpperCAmelCase )
A : Dict = tokenizer(_UpperCAmelCase )
self.assertEqual(out_s.input_ids[0] , _UpperCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
A : List[Any] = tokenizer.decode(out_s.input_ids )
A : Dict = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , _UpperCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def snake_case ( self ):
pass
def snake_case ( self ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
A : str = [self.get_tokenizer(do_lower_case=_UpperCAmelCase , add_bos_token=_UpperCAmelCase )]
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
A : Optional[int] = '''Encode this.'''
A : Dict = '''This one too please.'''
A : Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
encoded_sequence += tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
A : int = tokenizer.encode_plus(
_UpperCAmelCase , _UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , )
A : Dict = encoded_sequence_dict['''input_ids''']
A : Tuple = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
A : Optional[int] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(_UpperCAmelCase )
]
A : str = [x for x in filtered_sequence if x is not None]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
@require_tokenizers
class _lowercase ( unittest.TestCase ):
def snake_case ( self ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
A : str = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=_UpperCAmelCase )
A : Optional[Any] = '''A photo of a cat'''
A : Optional[int] = tokenizer.encode(
_UpperCAmelCase , )
self.assertEqual(_UpperCAmelCase , [2, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained('''test_opt''' )
A : List[Any] = AutoTokenizer.from_pretrained('''./test_opt''' )
A : Tuple = tokenizer.encode(
_UpperCAmelCase , )
self.assertEqual(_UpperCAmelCase , [2, 250, 1_345, 9, 10, 4_758] )
def snake_case ( self ):
A : Tuple = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=_UpperCAmelCase )
A : int = '''A photo of a cat'''
A : Optional[Any] = tokenizer.encode(
_UpperCAmelCase , )
# Same as above
self.assertEqual(_UpperCAmelCase , [2, 250, 1_345, 9, 10, 4_758] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def snake_case ( self ):
A : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=_UpperCAmelCase )
A : str = '''bos'''
A : Optional[Any] = tokenizer.get_vocab()['''bos''']
A : List[str] = '''A photo of a cat'''
A : List[Any] = tokenizer.encode(
_UpperCAmelCase , )
# We changed the bos token
self.assertEqual(_UpperCAmelCase , [31_957, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained('''./tok''' )
A : List[str] = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
A : List[str] = tokenizer.encode(
_UpperCAmelCase , )
self.assertEqual(_UpperCAmelCase , [31_957, 250, 1_345, 9, 10, 4_758] )
| 709
|
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
snake_case_ = ["""text""", """image""", """audio"""]
def _lowerCamelCase( UpperCamelCase__ : List[str] ) -> str:
A : int = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_000 ) )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
inputs.append(create_inputs(UpperCamelCase__ ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def _lowerCamelCase( UpperCamelCase__ : List ) -> Tuple:
A : Optional[int] = []
for output in outputs:
if isinstance(UpperCamelCase__ , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(UpperCamelCase__ , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(UpperCamelCase__ , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class _lowercase :
def snake_case ( self ):
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
A : Any = self.tool.inputs
for _input in inputs:
if isinstance(_input , _UpperCAmelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
A : Tuple = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def snake_case ( self ):
A : Any = create_inputs(self.tool.inputs )
A : Dict = self.tool(*_UpperCAmelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
A : Optional[int] = [outputs]
self.assertListEqual(output_types(_UpperCAmelCase ) , self.tool.outputs )
def snake_case ( self ):
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def snake_case ( self ):
A : List[str] = create_inputs(self.tool.inputs )
A : Dict = self.tool(*_UpperCAmelCase )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
A : int = [outputs]
self.assertEqual(len(_UpperCAmelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(_UpperCAmelCase , self.tool.outputs ):
A : Tuple = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_UpperCAmelCase , _UpperCAmelCase ) )
def snake_case ( self ):
A : Tuple = create_inputs(self.tool.inputs )
A : Dict = []
for _input, input_type in zip(_UpperCAmelCase , self.tool.inputs ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
A : List[Any] = self.tool(*_UpperCAmelCase )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
A : Any = [outputs]
self.assertEqual(len(_UpperCAmelCase ) , len(self.tool.outputs ) )
| 537
| 0
|
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _snake_case :
def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=30 ,_snake_case=2 ,_snake_case=3 ,_snake_case=True ,_snake_case=True ,_snake_case=32 ,_snake_case=5 ,_snake_case=4 ,_snake_case=37 ,_snake_case="gelu" ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=10 ,_snake_case=0.02 ,_snake_case=3 ,_snake_case=None ,_snake_case=2 ,):
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : List[Any] = image_size
UpperCAmelCase_ : Dict = patch_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = type_sequence_label_size
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Dict = scope
UpperCAmelCase_ : str = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase_ : List[str] = (image_size // patch_size) ** 2
UpperCAmelCase_ : Union[str, Any] = num_patches + 2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : int = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : int = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_snake_case ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : List[str] = DeiTModel(config=_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase_ : Dict = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : int = DeiTForMaskedImageModeling(config=_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase_ : Dict = model(_snake_case )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : Any = 1
UpperCAmelCase_ : int = DeiTForMaskedImageModeling(_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Tuple = model(_snake_case )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Optional[int] = self.type_sequence_label_size
UpperCAmelCase_ : List[Any] = DeiTForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase_ : Dict = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : Tuple = DeiTForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : int = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : List[str] = config_and_inputs
UpperCAmelCase_ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[Any] =(
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__A : str =(
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__A : Optional[int] =False
__A : Optional[Any] =False
__A : List[str] =False
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = DeiTModelTester(self )
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case ,hidden_size=37 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = model_class(_snake_case )
UpperCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : List[Any] = [*signature.parameters.keys()]
UpperCAmelCase_ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=False ):
UpperCAmelCase_ : str = super()._prepare_for_class(_snake_case ,_snake_case ,return_labels=_snake_case )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase__ ( self ):
if not self.model_tester.is_training:
return
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : int = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_snake_case )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
UpperCAmelCase_ : Dict = model_class(_snake_case )
model.to(_snake_case )
model.train()
UpperCAmelCase_ : Any = self._prepare_for_class(_snake_case ,_snake_case ,return_labels=_snake_case )
UpperCAmelCase_ : Union[str, Any] = model(**_snake_case ).loss
loss.backward()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : Union[str, Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(_snake_case ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
UpperCAmelCase_ : Optional[Any] = model_class(_snake_case )
model.gradient_checkpointing_enable()
model.to(_snake_case )
model.train()
UpperCAmelCase_ : Union[str, Any] = self._prepare_for_class(_snake_case ,_snake_case ,return_labels=_snake_case )
UpperCAmelCase_ : Dict = model(**_snake_case ).loss
loss.backward()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Any = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_snake_case ),
*get_values(_snake_case ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type["title"]}''' ):
UpperCAmelCase_ : Tuple = problem_type["title"]
UpperCAmelCase_ : Dict = problem_type["num_labels"]
UpperCAmelCase_ : int = model_class(_snake_case )
model.to(_snake_case )
model.train()
UpperCAmelCase_ : List[Any] = self._prepare_for_class(_snake_case ,_snake_case ,return_labels=_snake_case )
if problem_type["num_labels"] > 1:
UpperCAmelCase_ : Tuple = inputs["labels"].unsqueeze(1 ).repeat(1 ,problem_type["num_labels"] )
UpperCAmelCase_ : Optional[Any] = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_snake_case ) as warning_list:
UpperCAmelCase_ : Dict = model(**_snake_case ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def UpperCamelCase__ ( self ):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[str] = DeiTModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def a__ ( ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case (unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.default_image_processor
UpperCAmelCase_ : Tuple = prepare_img()
UpperCAmelCase_ : Union[str, Any] = image_processor(images=_snake_case ,return_tensors="pt" ).to(_snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Any = model(**_snake_case )
# verify the logits
UpperCAmelCase_ : Optional[int] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape ,_snake_case )
UpperCAmelCase_ : Tuple = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" ,torch_dtype=torch.floataa ,device_map="auto" )
UpperCAmelCase_ : Optional[Any] = self.default_image_processor
UpperCAmelCase_ : List[str] = prepare_img()
UpperCAmelCase_ : List[str] = image_processor(images=_snake_case ,return_tensors="pt" )
UpperCAmelCase_ : Union[str, Any] = inputs.pixel_values.to(_snake_case )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(_snake_case )
| 71
|
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
lowerCAmelCase_ = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
lowerCAmelCase_ = F"""https://www.google.com/search?q={query}&num=100"""
lowerCAmelCase_ = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
lowerCAmelCase_ = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
lowerCAmelCase_ = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 411
| 0
|
'''simple docstring'''
def __magic_name__( _A , _A ):
'''simple docstring'''
UpperCamelCase__ = len(_A )
UpperCamelCase__ = len(_A )
UpperCamelCase__ = (
first_str_length if first_str_length > second_str_length else second_str_length
)
UpperCamelCase__ = []
for char_count in range(_A ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_A )
if __name__ == "__main__":
print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
| 265
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__a : Tuple = "deformable_detr"
__a : str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : List[Any] , lowercase : List[Any]=True , lowercase : Tuple=None , lowercase : Tuple=3 , lowercase : List[str]=3_0_0 , lowercase : List[Any]=1_0_2_4 , lowercase : List[Any]=6 , lowercase : Tuple=1_0_2_4 , lowercase : Union[str, Any]=8 , lowercase : Optional[Any]=6 , lowercase : Tuple=1_0_2_4 , lowercase : Any=8 , lowercase : List[str]=0.0 , lowercase : Any=True , lowercase : Union[str, Any]="relu" , lowercase : Dict=2_5_6 , lowercase : Optional[int]=0.1 , lowercase : Optional[int]=0.0 , lowercase : List[str]=0.0 , lowercase : Any=0.0_2 , lowercase : Optional[int]=1.0 , lowercase : Tuple=True , lowercase : Optional[int]=False , lowercase : Any="sine" , lowercase : List[str]="resnet50" , lowercase : List[str]=True , lowercase : Optional[int]=False , lowercase : int=4 , lowercase : str=4 , lowercase : Dict=4 , lowercase : int=False , lowercase : List[Any]=3_0_0 , lowercase : List[Any]=False , lowercase : Dict=1 , lowercase : int=5 , lowercase : List[Any]=2 , lowercase : List[str]=1 , lowercase : Tuple=1 , lowercase : Dict=5 , lowercase : Dict=2 , lowercase : Dict=0.1 , lowercase : List[str]=0.2_5 , lowercase : Tuple=False , **lowercase : str , ) -> str:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCamelCase__ = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowercase , lowercase ):
UpperCamelCase__ = backbone_config.get("""model_type""" )
UpperCamelCase__ = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ = config_class.from_dict(lowercase )
UpperCamelCase__ = use_timm_backbone
UpperCamelCase__ = backbone_config
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_queries
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = d_model
UpperCamelCase__ = encoder_ffn_dim
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = encoder_attention_heads
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = activation_function
UpperCamelCase__ = init_std
UpperCamelCase__ = init_xavier_std
UpperCamelCase__ = encoder_layerdrop
UpperCamelCase__ = auxiliary_loss
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = backbone
UpperCamelCase__ = use_pretrained_backbone
UpperCamelCase__ = dilation
# deformable attributes
UpperCamelCase__ = num_feature_levels
UpperCamelCase__ = encoder_n_points
UpperCamelCase__ = decoder_n_points
UpperCamelCase__ = two_stage
UpperCamelCase__ = two_stage_num_proposals
UpperCamelCase__ = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = mask_loss_coefficient
UpperCamelCase__ = dice_loss_coefficient
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
UpperCamelCase__ = focal_alpha
UpperCamelCase__ = disable_custom_kernels
super().__init__(is_encoder_decoder=lowercase , **lowercase )
@property
def A ( self : str ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def A ( self : Tuple ) -> int:
'''simple docstring'''
return self.d_model
def A ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase__ = self.backbone_config.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 265
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 41
|
'''simple docstring'''
from graphs.minimum_spanning_tree_kruskal import kruskal
def _UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : Optional[int] = 9
__magic_name__ : Tuple = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__magic_name__ : List[str] = kruskal(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ : List[Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(UpperCamelCase__ ) == sorted(UpperCamelCase__ )
| 436
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = "▁"
_lowerCAmelCase : Any = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : List[str] = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
_lowerCAmelCase : Union[str, Any] = {
"google/pegasus-xsum": 5_1_2,
}
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = PegasusTokenizer
SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask']
def __init__( self ,a_=None ,a_=None ,a_="<pad>" ,a_="</s>" ,a_="<unk>" ,a_="<mask_2>" ,a_="<mask_1>" ,a_=None ,a_=103 ,**a_ ,):
"""simple docstring"""
lowerCAmelCase__ = offset
if additional_special_tokens is not None:
if not isinstance(a_ ,a_ ):
raise TypeError(
f'additional_special_tokens should be of type {type(a_ )}, but is'
f' {type(a_ )}' )
lowerCAmelCase__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(a_ ) ,self.offset - 1 )
]
if len(set(a_ ) ) != len(a_ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
lowerCAmelCase__ = additional_special_tokens_extended
else:
lowerCAmelCase__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2 ,self.offset )]
super().__init__(
a_ ,tokenizer_file=a_ ,pad_token=a_ ,eos_token=a_ ,unk_token=a_ ,mask_token=a_ ,mask_token_sent=a_ ,offset=a_ ,additional_special_tokens=a_ ,**a_ ,)
lowerCAmelCase__ = vocab_file
lowerCAmelCase__ = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
f' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}' )
return [1 if x in all_special_ids else 0 for x in seq]
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = None ,a_ = False ):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(a_ )
elif token_ids_a is None:
return self._special_token_mask(a_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(a_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
a_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file ,a_ )
return (out_vocab_file,)
| 604
|
def UpperCAmelCase_ ( snake_case__ = 200 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = [1, 2, 5, 10, 20, 50, 100, 200]
lowerCAmelCase__ = [0] * (pence + 1)
lowerCAmelCase__ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(snake_case__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_0_0) == 7_3_6_8_2
| 604
| 1
|
'''simple docstring'''
from __future__ import annotations
def _snake_case ( A , A ) -> float:
lowerCAmelCase__ = sorted(numsa + numsa )
lowerCAmelCase__ , lowerCAmelCase__ = divmod(len(A ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = [float(x) for x in input('''Enter the elements of first array: ''').split()]
__UpperCAmelCase = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 90
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class _A ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Any ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=lowerCamelCase , )
assert hasattr(self , "env" )
def _snake_case ( self : List[Any] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = {
"enabled": True,
"processes_per_host": 8,
}
__lowercase = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
__lowercase = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
__lowercase = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase , py_version="py36" , )
def _snake_case ( self : Tuple , lowerCamelCase : Dict ):
'''simple docstring'''
TrainingJobAnalytics(lowerCamelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def _snake_case ( self : List[Any] , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__lowercase = self.create_estimator(lowerCamelCase )
# run training
estimator.fit()
# result dataframe
__lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , lowerCamelCase )
| 402
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase( _a , unittest.TestCase):
"""simple docstring"""
lowerCamelCase__ = BlenderbotSmallTokenizer
lowerCamelCase__ = False
def SCREAMING_SNAKE_CASE__ ( self )-> Union[str, Any]:
super().setUp()
__A = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
__A = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
__A = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
__A = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
__A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self , **UpperCAmelCase )-> str:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> Dict:
__A = '''adapt act apte'''
__A = '''adapt act apte'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self )-> str:
__A = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__A = '''adapt act apte'''
__A = ['''adapt''', '''act''', '''ap@@''', '''te''']
__A = tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
__A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__A = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self )-> Any:
__A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [13_84]
__A = '''I am a small frog.'''
__A = tok([src_text] , padding=UpperCAmelCase , truncation=UpperCAmelCase )['''input_ids''']
__A = tok.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def SCREAMING_SNAKE_CASE__ ( self )-> Optional[Any]:
__A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
__A = '''I am a small frog .'''
__A = '''.'''
__A = tok(UpperCAmelCase )['''input_ids''']
__A = tok(UpperCAmelCase )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 704
|
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
_UpperCamelCase : Optional[int] = datasets.logging.get_logger(__name__)
_UpperCamelCase : Dict = """\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
"""
_UpperCamelCase : Tuple = """\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project's README at https://github.com/google-research/bleurt#readme for more information.
"""
_UpperCamelCase : Union[str, Any] = """
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
'scores': List of scores.
Examples:
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> bleurt = datasets.load_metric(\"bleurt\")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results[\"scores\"]])
[1.03, 1.04]
"""
_UpperCamelCase : str = {
"""bleurt-tiny-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip""",
"""bleurt-tiny-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip""",
"""bleurt-base-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip""",
"""bleurt-base-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip""",
"""bleurt-large-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip""",
"""bleurt-large-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip""",
"""BLEURT-20-D3""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip""",
"""BLEURT-20-D6""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip""",
"""BLEURT-20-D12""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip""",
"""BLEURT-20""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip""",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _lowerCAmelCase( datasets.Metric):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self )-> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> Any:
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
__A = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
__A = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__A = self.config_name.upper()
else:
raise KeyError(
f"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}" )
# download the model checkpoint specified by self.config_name and set up the scorer
__A = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
__A = score.BleurtScorer(os.path.join(UpperCAmelCase , UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase )-> List[str]:
__A = self.scorer.score(references=UpperCAmelCase , candidates=UpperCAmelCase )
return {"scores": scores}
| 341
| 0
|
import math
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if (
not isinstance(SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if (
not isinstance(SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
|
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = 0
while b > 0:
if b & 1:
__UpperCAmelCase = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 303
| 1
|
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , a : Any , a : Optional[Any]=13 , a : Any=7 , a : Optional[int]=True , a : Dict=True , a : Optional[Any]=True , a : int=True , a : int=99 , a : Any=32 , a : Any=5 , a : int=4 , a : Tuple=37 , a : List[Any]="gelu" , a : Any=0.1 , a : str=0.1 , a : List[Any]=512 , a : Union[str, Any]=16 , a : Tuple=2 , a : Optional[Any]=0.02 , a : List[str]=4 , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : Tuple = seq_length
SCREAMING_SNAKE_CASE : Optional[Any] = is_training
SCREAMING_SNAKE_CASE : Optional[Any] = use_attention_mask
SCREAMING_SNAKE_CASE : str = use_token_type_ids
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Any = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = num_choices
def __UpperCamelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : List[str] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =True
lowerCamelCase__ =(
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = FlaxRoFormerModelTester(self )
@slow
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=a )
SCREAMING_SNAKE_CASE : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
SCREAMING_SNAKE_CASE : Tuple = jnp.array([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : List[Any] = model(a )[0]
SCREAMING_SNAKE_CASE : List[Any] = 5_0000
SCREAMING_SNAKE_CASE : Optional[Any] = (1, 6, vocab_size)
self.assertEqual(output.shape , a )
SCREAMING_SNAKE_CASE : List[str] = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , a , atol=1e-4 ) )
| 193
|
import math
def lowerCamelCase__ ( _a , _a):
if (
not isinstance(_a , (int, float))
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1.")
return apparent_power * power_factor
def lowerCamelCase__ ( _a , _a):
if (
not isinstance(_a , (int, float))
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1.")
return apparent_power * math.sqrt(1 - power_factor**2)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 193
| 1
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
UpperCAmelCase_ : List[Any] = inspect.getfile(accelerate.test_utils )
UpperCAmelCase_ : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
UpperCAmelCase_ : List[Any] = test_metrics
@require_cpu
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
self.test_metrics.main()
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCAmelCase_ : List[str] = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
| 95
|
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def _lowerCamelCase( a , a = "cpu" , a = None ):
__a = torch.load(a , map_location=a )
for k, v in tqdm(state_dict.items() ):
if not isinstance(a , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
__a = v.half()
if save_path is None: # overwrite src_path
__a = src_path
torch.save(a , a )
if __name__ == "__main__":
fire.Fire(convert)
| 528
| 0
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowercase : str = logging.get_logger(__name__)
@add_end_docstrings(_lowerCAmelCase )
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : str , *_lowercase : Dict , **_lowercase : str ):
super().__init__(*_lowercase , **_lowercase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def a ( self : Optional[Any] , _lowercase : List[Any]=None ):
__UpperCAmelCase = {}
if top_k is not None:
__UpperCAmelCase = top_k
return {}, {}, postprocess_params
def __call__( self : Dict , _lowercase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_lowercase : List[Any] ):
return super().__call__(_lowercase , **_lowercase )
def a ( self : Dict , _lowercase : Any ):
__UpperCAmelCase = load_image(_lowercase )
__UpperCAmelCase = self.image_processor(images=_lowercase , return_tensors=self.framework )
return model_inputs
def a ( self : Optional[Any] , _lowercase : List[str] ):
__UpperCAmelCase = self.model(**_lowercase )
return model_outputs
def a ( self : Any , _lowercase : Tuple , _lowercase : str=5 ):
if top_k > self.model.config.num_labels:
__UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
__UpperCAmelCase = model_outputs.logits.softmax(-1 )[0]
__UpperCAmelCase , __UpperCAmelCase = probs.topk(_lowercase )
elif self.framework == "tf":
__UpperCAmelCase = stable_softmax(model_outputs.logits , axis=-1 )[0]
__UpperCAmelCase = tf.math.top_k(_lowercase , k=_lowercase )
__UpperCAmelCase , __UpperCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
__UpperCAmelCase = scores.tolist()
__UpperCAmelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_lowercase , _lowercase )]
| 397
|
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowercase__ ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case_ ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def lowercase__ ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def lowercase__ ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case_ ):
http_head('''https://huggingface.co''' )
| 397
| 1
|
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class lowerCamelCase_ ( _UpperCAmelCase ):
__lowercase : int = None
__lowercase : Tuple = None
__lowercase : Dict = None
__lowercase : List[str] = None
class lowerCamelCase_ ( _UpperCAmelCase ):
def __init__( self , lowerCamelCase_=1 , lowerCamelCase_=0 , lowerCamelCase_=2 , lowerCamelCase_=5_12 , lowerCamelCase_="cls" , lowerCamelCase_=False , lowerCamelCase_=True , **lowerCamelCase_ , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
_UpperCamelCase = project_dim
_UpperCamelCase = pooler_fn
_UpperCamelCase = learn_encoder
_UpperCamelCase = use_attention_mask
class lowerCamelCase_ ( _UpperCAmelCase ):
__lowercase : Dict = [R"pooler", R"logit_scale"]
__lowercase : Optional[Any] = [R"position_ids", R"predictions.decoder.bias"]
__lowercase : int = "roberta"
__lowercase : str = RobertaSeriesConfig
def __init__( self , lowerCamelCase_ ) -> Any:
"""simple docstring"""
super().__init__(lowerCamelCase_ )
_UpperCamelCase = XLMRobertaModel(lowerCamelCase_ )
_UpperCamelCase = nn.Linear(config.hidden_size , config.project_dim )
_UpperCamelCase = getattr(lowerCamelCase_ , "has_pre_transformation" , lowerCamelCase_ )
if self.has_pre_transformation:
_UpperCamelCase = nn.Linear(config.hidden_size , config.project_dim )
_UpperCamelCase = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def lowercase ( self , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.base_model(
input_ids=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , position_ids=lowerCamelCase_ , head_mask=lowerCamelCase_ , inputs_embeds=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , output_attentions=lowerCamelCase_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=lowerCamelCase_ , )
if self.has_pre_transformation:
_UpperCamelCase = outputs['''hidden_states'''][-2]
_UpperCamelCase = self.pre_LN(lowerCamelCase_ )
_UpperCamelCase = self.transformation_pre(lowerCamelCase_ )
return TransformationModelOutput(
projection_state=lowerCamelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
_UpperCamelCase = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=lowerCamelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 147
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __A ( a_ : int=None )-> Tuple:
'''simple docstring'''
if subparsers is not None:
SCREAMING_SNAKE_CASE : List[str] = subparsers.add_parser('''test''' )
else:
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=a_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=a_ )
return parser
def __A ( a_ : Any )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
SCREAMING_SNAKE_CASE : Tuple = script_name
else:
SCREAMING_SNAKE_CASE : Optional[Any] = F"--config_file={args.config_file} {script_name}"
SCREAMING_SNAKE_CASE : str = ['''accelerate-launch'''] + test_args.split()
SCREAMING_SNAKE_CASE : List[str] = execute_subprocess_async(a_ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __A ( )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = test_command_parser()
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
test_command(a_ )
if __name__ == "__main__":
main()
| 698
| 0
|
from __future__ import annotations
from typing import Any
def lowerCamelCase_ ( lowerCAmelCase: list[Any] )-> None:
create_state_space_tree(lowerCAmelCase , [] , 0 )
def lowerCamelCase_ ( lowerCAmelCase: list[Any] , lowerCAmelCase: list[Any] , lowerCAmelCase: int )-> None:
if index == len(lowerCAmelCase ):
print(lowerCAmelCase )
return
create_state_space_tree(lowerCAmelCase , lowerCAmelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowerCAmelCase , lowerCAmelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 669
|
def lowerCamelCase_ ( lowerCAmelCase: int )-> list:
_snake_case : List[Any] = int(lowerCAmelCase )
if n_element < 1:
_snake_case : int = ValueError('a should be a positive number' )
raise my_error
_snake_case : Union[str, Any] = [1]
_snake_case , _snake_case , _snake_case : Any = (0, 0, 0)
_snake_case : str = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase_ = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 669
| 1
|
def lowerCAmelCase_ ( __a , __a ) -> list[int]:
"""simple docstring"""
lowerCamelCase__: Optional[int] =int(__a )
# Initialize Result
lowerCamelCase__: Any =[]
# Traverse through all denomination
for denomination in reversed(__a ):
# Find denominations
while int(__a ) >= int(__a ):
total_value -= int(__a )
answer.append(__a ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__A = []
__A = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
__A = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(f'Denomination {i}: ').strip()))
__A = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
__A = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
__A = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(f'Following is minimal change for {value}: ')
__A = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 59
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_A = logging.get_logger(__name__)
_A = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
_A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
_A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class A ( __UpperCAmelCase ):
__snake_case = 'whisper'
__snake_case = ['past_key_values']
__snake_case = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self, UpperCamelCase__=5_1865, UpperCamelCase__=80, UpperCamelCase__=6, UpperCamelCase__=4, UpperCamelCase__=6, UpperCamelCase__=4, UpperCamelCase__=1536, UpperCamelCase__=1536, UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=5_0257, UpperCamelCase__=True, UpperCamelCase__=True, UpperCamelCase__="gelu", UpperCamelCase__=256, UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.02, UpperCamelCase__=False, UpperCamelCase__=1500, UpperCamelCase__=448, UpperCamelCase__=5_0256, UpperCamelCase__=5_0256, UpperCamelCase__=5_0256, UpperCamelCase__=None, UpperCamelCase__=[220, 5_0256], UpperCamelCase__=False, UpperCamelCase__=256, UpperCamelCase__=False, UpperCamelCase__=0.05, UpperCamelCase__=10, UpperCamelCase__=2, UpperCamelCase__=0.0, UpperCamelCase__=10, UpperCamelCase__=0, UpperCamelCase__=7, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = num_mel_bins
lowerCAmelCase_ = d_model
lowerCAmelCase_ = encoder_layers
lowerCAmelCase_ = encoder_attention_heads
lowerCAmelCase_ = decoder_layers
lowerCAmelCase_ = decoder_attention_heads
lowerCAmelCase_ = decoder_ffn_dim
lowerCAmelCase_ = encoder_ffn_dim
lowerCAmelCase_ = dropout
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = activation_dropout
lowerCAmelCase_ = activation_function
lowerCAmelCase_ = init_std
lowerCAmelCase_ = encoder_layerdrop
lowerCAmelCase_ = decoder_layerdrop
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = encoder_layers
lowerCAmelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase_ = max_source_positions
lowerCAmelCase_ = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase_ = classifier_proj_size
lowerCAmelCase_ = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase_ = apply_spec_augment
lowerCAmelCase_ = mask_time_prob
lowerCAmelCase_ = mask_time_length
lowerCAmelCase_ = mask_time_min_masks
lowerCAmelCase_ = mask_feature_prob
lowerCAmelCase_ = mask_feature_length
lowerCAmelCase_ = mask_feature_min_masks
lowerCAmelCase_ = median_filter_width
super().__init__(
pad_token_id=UpperCamelCase__, bos_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, is_encoder_decoder=UpperCamelCase__, decoder_start_token_id=UpperCamelCase__, suppress_tokens=UpperCamelCase__, begin_suppress_tokens=UpperCamelCase__, **UpperCamelCase__, )
class A ( __UpperCAmelCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCAmelCase_ = {0: '''batch'''}
else:
lowerCAmelCase_ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__, direction='''inputs''' )
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = -1, UpperCamelCase__ = -1, UpperCamelCase__ = False, UpperCamelCase__ = None, UpperCamelCase__ = 2_2050, UpperCamelCase__ = 5.0, UpperCamelCase__ = 220, ):
"""simple docstring"""
lowerCAmelCase_ = OrderedDict()
lowerCAmelCase_ = OnnxConfig.generate_dummy_inputs(
self, preprocessor=preprocessor.feature_extractor, batch_size=UpperCamelCase__, framework=UpperCamelCase__, sampling_rate=UpperCamelCase__, time_duration=UpperCamelCase__, frequency=UpperCamelCase__, )
lowerCAmelCase_ = encoder_inputs['''input_features'''].shape[2]
lowerCAmelCase_ = encoder_sequence_length // 2 if self.use_past else seq_length
lowerCAmelCase_ = super().generate_dummy_inputs(
preprocessor.tokenizer, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
lowerCAmelCase_ = encoder_inputs.pop('''input_features''' )
lowerCAmelCase_ = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
lowerCAmelCase_ = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return 1E-3
| 431
| 0
|
'''simple docstring'''
def lowerCAmelCase (__A = 1_000):
"""simple docstring"""
_a = 2**power
_a = 0
while n:
_a , _a = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 719
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowercase_ = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Dict = 'facebook/nllb-200-distilled-600M'
__lowerCamelCase : Optional[Any] = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__lowerCamelCase : Optional[int] = 'translator'
__lowerCamelCase : int = AutoTokenizer
__lowerCamelCase : List[Any] = AutoModelForSeqaSeqLM
__lowerCamelCase : int = LANGUAGE_CODES
__lowerCamelCase : Tuple = ['text', 'text', 'text']
__lowerCamelCase : Optional[Any] = ['text']
def a__ (self , A , A , A ) -> List[str]:
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(f'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'''{tgt_lang} is not a supported language.''' )
_a = self.lang_to_code[src_lang]
_a = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
A , return_tensors='''pt''' , src_lang=A , tgt_lang=A )
def a__ (self , A ) -> Optional[Any]:
"""simple docstring"""
return self.model.generate(**A )
def a__ (self , A ) -> List[str]:
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=A )
| 352
| 0
|
import math
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : int , lowerCAmelCase__ : Tuple=0 ) -> List[Any]: # a graph with Node 0,1,...,N-1
snake_case__ = n
snake_case__ = [
[math.inf for j in range(0 , lowerCAmelCase__ )] for i in range(0 , lowerCAmelCase__ )
] # adjacency matrix for weight
snake_case__ = [
[math.inf for j in range(0 , lowerCAmelCase__ )] for i in range(0 , lowerCAmelCase__ )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase_ ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str ) -> List[Any]:
snake_case__ = w
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
snake_case__ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase_ ( self : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] ) -> List[Any]:
return self.dp[u][v]
if __name__ == "__main__":
lowerCAmelCase : Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 214
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'canine'
def __init__(self , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3_072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=16_384 , lowerCamelCase=16 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=0 , lowerCamelCase=0Xe_0_0_0 , lowerCamelCase=0Xe_0_0_1 , lowerCamelCase=4 , lowerCamelCase=4 , lowerCamelCase=8 , lowerCamelCase=16_384 , lowerCamelCase=128 , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = layer_norm_eps
# Character config:
_lowerCAmelCase = downsampling_rate
_lowerCAmelCase = upsampling_kernel_size
_lowerCAmelCase = num_hash_functions
_lowerCAmelCase = num_hash_buckets
_lowerCAmelCase = local_transformer_stride
| 156
| 0
|
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE : Optional[Any] = """PoolFormerConfig"""
# Base docstring
SCREAMING_SNAKE_CASE : List[Any] = """sail/poolformer_s12"""
SCREAMING_SNAKE_CASE : Tuple = [1, 512, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE : Union[str, Any] = """sail/poolformer_s12"""
SCREAMING_SNAKE_CASE : List[str] = """tabby, tabby cat"""
SCREAMING_SNAKE_CASE : Optional[Any] = [
"""sail/poolformer_s12""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def __A ( _A , _A = 0.0 , _A = False ):
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
__a = 1 - drop_prob
__a = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
__a = keep_prob + torch.rand(_A , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
__a = input.div(_A ) * random_tensor
return output
class A_ ( nn.Module ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[float] = None ):
super().__init__()
__a = drop_prob
def _UpperCAmelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : torch.Tensor ):
return drop_path(__SCREAMING_SNAKE_CASE , self.drop_prob , self.training )
def _UpperCAmelCase ( self : List[Any] ):
return "p={}".format(self.drop_prob )
class A_ ( nn.Module ):
def __init__( self : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int]=None ):
super().__init__()
__a = patch_size if isinstance(__SCREAMING_SNAKE_CASE , collections.abc.Iterable ) else (patch_size, patch_size)
__a = stride if isinstance(__SCREAMING_SNAKE_CASE , collections.abc.Iterable ) else (stride, stride)
__a = padding if isinstance(__SCREAMING_SNAKE_CASE , collections.abc.Iterable ) else (padding, padding)
__a = nn.Convad(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=__SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
__a = norm_layer(__SCREAMING_SNAKE_CASE ) if norm_layer else nn.Identity()
def _UpperCAmelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ):
__a = self.projection(__SCREAMING_SNAKE_CASE )
__a = self.norm(__SCREAMING_SNAKE_CASE )
return embeddings
class A_ ( nn.GroupNorm ):
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : Optional[Any] ):
super().__init__(1 , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class A_ ( nn.Module ):
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Dict ):
super().__init__()
__a = nn.AvgPoolad(__SCREAMING_SNAKE_CASE , stride=1 , padding=pool_size // 2 , count_include_pad=__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : str , __SCREAMING_SNAKE_CASE : Optional[int] ):
return self.pool(__SCREAMING_SNAKE_CASE ) - hidden_states
class A_ ( nn.Module ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] ):
super().__init__()
__a = nn.Convad(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 1 )
__a = nn.Convad(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 1 )
__a = PoolFormerDropPath(__SCREAMING_SNAKE_CASE )
if isinstance(config.hidden_act , __SCREAMING_SNAKE_CASE ):
__a = ACTaFN[config.hidden_act]
else:
__a = config.hidden_act
def _UpperCAmelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str ):
__a = self.conva(__SCREAMING_SNAKE_CASE )
__a = self.act_fn(__SCREAMING_SNAKE_CASE )
__a = self.drop(__SCREAMING_SNAKE_CASE )
__a = self.conva(__SCREAMING_SNAKE_CASE )
__a = self.drop(__SCREAMING_SNAKE_CASE )
return hidden_states
class A_ ( nn.Module ):
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
super().__init__()
__a = PoolFormerPooling(__SCREAMING_SNAKE_CASE )
__a = PoolFormerOutput(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = PoolFormerGroupNorm(__SCREAMING_SNAKE_CASE )
__a = PoolFormerGroupNorm(__SCREAMING_SNAKE_CASE )
# Useful for training neural nets
__a = PoolFormerDropPath(__SCREAMING_SNAKE_CASE ) if drop_path > 0.0 else nn.Identity()
__a = config.use_layer_scale
if config.use_layer_scale:
__a = nn.Parameter(
config.layer_scale_init_value * torch.ones((__SCREAMING_SNAKE_CASE) ) , requires_grad=__SCREAMING_SNAKE_CASE )
__a = nn.Parameter(
config.layer_scale_init_value * torch.ones((__SCREAMING_SNAKE_CASE) ) , requires_grad=__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : int , __SCREAMING_SNAKE_CASE : str ):
if self.use_layer_scale:
__a = self.pooling(self.before_norm(__SCREAMING_SNAKE_CASE ) )
__a = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
__a = hidden_states + self.drop_path(__SCREAMING_SNAKE_CASE )
__a = ()
__a = self.output(self.after_norm(__SCREAMING_SNAKE_CASE ) )
__a = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
__a = hidden_states + self.drop_path(__SCREAMING_SNAKE_CASE )
__a = (output,) + outputs
return outputs
else:
__a = self.drop_path(self.pooling(self.before_norm(__SCREAMING_SNAKE_CASE ) ) )
# First residual connection
__a = pooling_output + hidden_states
__a = ()
# Second residual connection inside the PoolFormerOutput block
__a = self.drop_path(self.output(self.after_norm(__SCREAMING_SNAKE_CASE ) ) )
__a = hidden_states + layer_output
__a = (output,) + outputs
return outputs
class A_ ( nn.Module ):
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ):
super().__init__()
__a = config
# stochastic depth decay rule
__a = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
__a = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
__a = nn.ModuleList(__SCREAMING_SNAKE_CASE )
# Transformer blocks
__a = []
__a = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
__a = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__SCREAMING_SNAKE_CASE , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__SCREAMING_SNAKE_CASE ) )
__a = nn.ModuleList(__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : List[Any]=True ):
__a = () if output_hidden_states else None
__a = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
__a , __a = layers
# Get patch embeddings from hidden_states
__a = embedding_layer(__SCREAMING_SNAKE_CASE )
# Send the embeddings through the blocks
for _, blk in enumerate(__SCREAMING_SNAKE_CASE ):
__a = blk(__SCREAMING_SNAKE_CASE )
__a = layer_outputs[0]
if output_hidden_states:
__a = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__SCREAMING_SNAKE_CASE , hidden_states=__SCREAMING_SNAKE_CASE )
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = PoolFormerConfig
_SCREAMING_SNAKE_CASE = """poolformer"""
_SCREAMING_SNAKE_CASE = """pixel_values"""
_SCREAMING_SNAKE_CASE = True
def _UpperCAmelCase ( self : int , __SCREAMING_SNAKE_CASE : Any ):
if isinstance(__SCREAMING_SNAKE_CASE , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__SCREAMING_SNAKE_CASE , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _UpperCAmelCase ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int=False ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__a = value
SCREAMING_SNAKE_CASE : Any = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE : Any = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
"""
@add_start_docstrings(
"""The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , a_ , )
class A_ ( a_ ):
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int ):
super().__init__(__SCREAMING_SNAKE_CASE )
__a = config
__a = PoolFormerEncoder(__SCREAMING_SNAKE_CASE )
# Initialize weights and apply final processing
self.post_init()
def _UpperCAmelCase ( self : Tuple ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _UpperCAmelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , ):
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
__a = self.encoder(
__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )
__a = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__SCREAMING_SNAKE_CASE , hidden_states=encoder_outputs.hidden_states , )
class A_ ( nn.Module ):
def __init__( self : int , __SCREAMING_SNAKE_CASE : Any ):
super().__init__()
__a = nn.Linear(config.hidden_size , config.hidden_size )
def _UpperCAmelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] ):
__a = self.dense(__SCREAMING_SNAKE_CASE )
return output
@add_start_docstrings(
"""
PoolFormer Model transformer with an image classification head on top
""" , a_ , )
class A_ ( a_ ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : int ):
super().__init__(__SCREAMING_SNAKE_CASE )
__a = config.num_labels
__a = PoolFormerModel(__SCREAMING_SNAKE_CASE )
# Final norm
__a = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
__a = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _UpperCAmelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[torch.LongTensor] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , ):
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = self.poolformer(
__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )
__a = outputs[0]
__a = self.classifier(self.norm(__SCREAMING_SNAKE_CASE ).mean([-2, -1] ) )
__a = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__a = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__a = "single_label_classification"
else:
__a = "multi_label_classification"
if self.config.problem_type == "regression":
__a = MSELoss()
if self.num_labels == 1:
__a = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__a = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif self.config.problem_type == "single_label_classification":
__a = CrossEntropyLoss()
__a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__a = BCEWithLogitsLoss()
__a = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not return_dict:
__a = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states )
| 525
|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Optional[Any]=30 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : str=5 , __SCREAMING_SNAKE_CASE : List[str]=4 , __SCREAMING_SNAKE_CASE : List[Any]=37 , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=10 , __SCREAMING_SNAKE_CASE : int=0.02 , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : str=2 , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = scope
__a = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__a = (image_size // patch_size) ** 2
__a = num_patches + 1
def _UpperCAmelCase ( self : Any ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : str ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _UpperCAmelCase ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any ):
__a = ViTModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__a = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict ):
__a = ViTForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__a = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__a = 1
__a = ViTForMaskedImageModeling(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _UpperCAmelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] ):
__a = self.type_sequence_label_size
__a = ViTForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a = 1
__a = ViTForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self : List[Any] ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( a_ , a_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def _UpperCAmelCase ( self : Optional[Any] ):
__a = ViTModelTester(self )
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def _UpperCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def _UpperCAmelCase ( self : Optional[Any] ):
pass
def _UpperCAmelCase ( self : int ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def _UpperCAmelCase ( self : Optional[int] ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__SCREAMING_SNAKE_CASE )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : List[str] ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Any ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Tuple ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self : Dict ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = ViTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def __A ( ):
"""simple docstring"""
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self : List[str] ):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self : str ):
__a = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(__SCREAMING_SNAKE_CASE )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__a = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
__a = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
__a = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def _UpperCAmelCase ( self : Union[str, Any] ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
__a = ViTModel.from_pretrained("facebook/dino-vits8" ).to(__SCREAMING_SNAKE_CASE )
__a = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=4_80 )
__a = prepare_img()
__a = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="pt" )
__a = inputs.pixel_values.to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__a = model(__SCREAMING_SNAKE_CASE , interpolate_pos_encoding=__SCREAMING_SNAKE_CASE )
# verify the logits
__a = torch.Size((1, 36_01, 3_84) )
self.assertEqual(outputs.last_hidden_state.shape , __SCREAMING_SNAKE_CASE )
__a = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _UpperCAmelCase ( self : Any ):
__a = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="pt" )
__a = inputs.pixel_values.to(__SCREAMING_SNAKE_CASE )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__a = model(__SCREAMING_SNAKE_CASE )
| 525
| 1
|
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 1 / sqrt(2 ) ):
"""simple docstring"""
a_ = tau * frequency / samplerate
a_ = sin(_snake_case )
a_ = cos(_snake_case )
a_ = _sin / (2 * q_factor)
a_ = (1 - _cos) / 2
a_ = 1 - _cos
a_ = 1 + alpha
a_ = -2 * _cos
a_ = 1 - alpha
a_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 1 / sqrt(2 ) ):
"""simple docstring"""
a_ = tau * frequency / samplerate
a_ = sin(_snake_case )
a_ = cos(_snake_case )
a_ = _sin / (2 * q_factor)
a_ = (1 + _cos) / 2
a_ = -1 - _cos
a_ = 1 + alpha
a_ = -2 * _cos
a_ = 1 - alpha
a_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 1 / sqrt(2 ) ):
"""simple docstring"""
a_ = tau * frequency / samplerate
a_ = sin(_snake_case )
a_ = cos(_snake_case )
a_ = _sin / (2 * q_factor)
a_ = _sin / 2
a_ = 0
a_ = -ba
a_ = 1 + alpha
a_ = -2 * _cos
a_ = 1 - alpha
a_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 1 / sqrt(2 ) ):
"""simple docstring"""
a_ = tau * frequency / samplerate
a_ = sin(_snake_case )
a_ = cos(_snake_case )
a_ = _sin / (2 * q_factor)
a_ = 1 - alpha
a_ = -2 * _cos
a_ = 1 + alpha
a_ = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 1 / sqrt(2 ) , ):
"""simple docstring"""
a_ = tau * frequency / samplerate
a_ = sin(_snake_case )
a_ = cos(_snake_case )
a_ = _sin / (2 * q_factor)
a_ = 10 ** (gain_db / 40)
a_ = 1 + alpha * big_a
a_ = -2 * _cos
a_ = 1 - alpha * big_a
a_ = 1 + alpha / big_a
a_ = -2 * _cos
a_ = 1 - alpha / big_a
a_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 1 / sqrt(2 ) , ):
"""simple docstring"""
a_ = tau * frequency / samplerate
a_ = sin(_snake_case )
a_ = cos(_snake_case )
a_ = _sin / (2 * q_factor)
a_ = 10 ** (gain_db / 40)
a_ = (big_a + 1) - (big_a - 1) * _cos
a_ = (big_a + 1) + (big_a - 1) * _cos
a_ = (big_a - 1) - (big_a + 1) * _cos
a_ = (big_a - 1) + (big_a + 1) * _cos
a_ = 2 * sqrt(_snake_case ) * alpha
a_ = big_a * (pmc + aaa)
a_ = 2 * big_a * mpc
a_ = big_a * (pmc - aaa)
a_ = ppmc + aaa
a_ = -2 * pmpc
a_ = ppmc - aaa
a_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 1 / sqrt(2 ) , ):
"""simple docstring"""
a_ = tau * frequency / samplerate
a_ = sin(_snake_case )
a_ = cos(_snake_case )
a_ = _sin / (2 * q_factor)
a_ = 10 ** (gain_db / 40)
a_ = (big_a + 1) - (big_a - 1) * _cos
a_ = (big_a + 1) + (big_a - 1) * _cos
a_ = (big_a - 1) - (big_a + 1) * _cos
a_ = (big_a - 1) + (big_a + 1) * _cos
a_ = 2 * sqrt(_snake_case ) * alpha
a_ = big_a * (ppmc + aaa)
a_ = -2 * big_a * pmpc
a_ = big_a * (ppmc - aaa)
a_ = pmc + aaa
a_ = 2 * mpc
a_ = pmc - aaa
a_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 483
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : UNetaDModel
UpperCAmelCase : KarrasVeScheduler
def __init__( self : Any , _UpperCAmelCase : UNetaDModel , _UpperCAmelCase : KarrasVeScheduler ):
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self : Optional[int] , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , **_UpperCAmelCase : Optional[Any] , ):
_A = self.unet.config.sample_size
_A = (batch_size, 3, img_size, img_size)
_A = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_A = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_A = self.scheduler.schedule[t]
_A = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_A , _A = self.scheduler.add_noise_to_input(_UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_A = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_A = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_A = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_A = self.scheduler.step_correct(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , step_output.prev_sample , step_output['derivative'] , )
_A = step_output.prev_sample
_A = (sample / 2 + 0.5).clamp(0 , 1 )
_A = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 7
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class __lowerCAmelCase ( __lowercase ):
_UpperCamelCase : int = """pix2struct_text_model"""
_UpperCamelCase : List[Any] = ["""past_key_values"""]
_UpperCamelCase : Union[str, Any] = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , snake_case=50_244 , snake_case=768 , snake_case=64 , snake_case=2_048 , snake_case=12 , snake_case=12 , snake_case=32 , snake_case=128 , snake_case=0.1 , snake_case=1E-6 , snake_case=1.0 , snake_case="gelu_new" , snake_case=0 , snake_case=False , snake_case=0 , snake_case=1 , snake_case=False , snake_case=True , **snake_case , ) -> str:
"""simple docstring"""
a__ : Any = vocab_size
a__ : List[str] = hidden_size
a__ : Tuple = d_kv
a__ : Dict = d_ff
a__ : str = num_layers
a__ : Any = num_heads
a__ : Optional[Any] = relative_attention_num_buckets
a__ : List[Any] = relative_attention_max_distance
a__ : str = dropout_rate
a__ : Tuple = layer_norm_epsilon
a__ : List[Any] = initializer_factor
a__ : Optional[int] = use_cache
a__ : str = eos_token_id
a__ : int = decoder_start_token_id
# for backwards compatibility
a__ : int = dense_act_fn
super().__init__(
pad_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , tie_word_embeddings=_A , is_decoder=_A , **_A , )
@classmethod
def _snake_case ( cls , snake_case , **snake_case ) -> List[str]:
"""simple docstring"""
cls._set_token_in_kwargs(_A )
a__ , a__ : List[Any] = cls.get_config_dict(_A , **_A )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
a__ : Union[str, Any] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
class __lowerCAmelCase ( __lowercase ):
_UpperCamelCase : int = """pix2struct_vision_model"""
def __init__( self , snake_case=768 , snake_case=768 , snake_case=2_048 , snake_case=64 , snake_case=12 , snake_case=12 , snake_case="gelu_new" , snake_case=1E-6 , snake_case=0.0 , snake_case=0.0 , snake_case=1E-10 , snake_case=1.0 , snake_case=4_096 , snake_case=32 , snake_case=128 , **snake_case , ) -> int:
"""simple docstring"""
super().__init__(**_A )
a__ : str = hidden_size
a__ : List[str] = patch_embed_hidden_size
a__ : List[str] = d_ff
a__ : int = dropout_rate
a__ : Union[str, Any] = num_hidden_layers
a__ : Tuple = num_attention_heads
a__ : List[Any] = initializer_range
a__ : Optional[int] = initializer_factor
a__ : Optional[Any] = attention_dropout
a__ : List[Any] = layer_norm_eps
a__ : List[Any] = dense_act_fn
a__ : Tuple = seq_len
a__ : Dict = relative_attention_num_buckets
a__ : Optional[Any] = relative_attention_max_distance
a__ : Optional[Any] = d_kv
@classmethod
def _snake_case ( cls , snake_case , **snake_case ) -> Union[str, Any]:
"""simple docstring"""
cls._set_token_in_kwargs(_A )
a__ , a__ : Optional[int] = cls.get_config_dict(_A , **_A )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
a__ : Optional[int] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
class __lowerCAmelCase ( __lowercase ):
_UpperCamelCase : Tuple = """pix2struct"""
_UpperCamelCase : List[str] = True
def __init__( self , snake_case=None , snake_case=None , snake_case=1.0 , snake_case=0.02 , snake_case=False , snake_case=False , snake_case=True , **snake_case , ) -> Any:
"""simple docstring"""
super().__init__(tie_word_embeddings=_A , is_encoder_decoder=_A , **_A )
if text_config is None:
a__ : str = {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
a__ : Any = {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
a__ : Dict = PixaStructTextConfig(**_A )
a__ : Optional[Any] = PixaStructVisionConfig(**_A )
a__ : Optional[Any] = self.text_config.decoder_start_token_id
a__ : Tuple = self.text_config.pad_token_id
a__ : List[str] = self.text_config.eos_token_id
a__ : List[Any] = initializer_factor
a__ : Any = initializer_range
a__ : Dict = self.initializer_range
a__ : Dict = self.initializer_range
a__ : str = is_vqa
@classmethod
def _snake_case ( cls , snake_case , snake_case , **snake_case ) -> str:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_A )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Tuple = copy.deepcopy(self.__dict__ )
a__ : int = self.text_config.to_dict()
a__ : str = self.vision_config.to_dict()
a__ : Any = self.__class__.model_type
return output
| 705
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowerCAmelCase ( _UpperCamelCase ):
@require_torch
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : str = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Optional[Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : Tuple = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Optional[int] = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Dict = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Tuple = "1"
a__ : List[Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : Optional[Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Tuple = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : List[Any] = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Tuple = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Any = self.get_env()
a__ : int = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
a__ : Dict = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
a__ : int = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : str = self.get_env()
a__ : List[str] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Union[str, Any] = "1"
a__ : Tuple = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = "\nfrom transformers import pipeline\n "
a__ : int = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
a__ : Dict = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
a__ : List[str] = self.get_env()
a__ : Union[str, Any] = "1"
a__ : List[str] = [sys.executable, "-c", "\n".join([load, mock, run] )]
a__ : Any = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Any = "\nfrom transformers import AutoModel\n "
a__ : Any = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
a__ : List[str] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : Optional[Any] = self.get_env()
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Dict = "1"
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 629
| 0
|
from __future__ import annotations
A_ : Union[str, Any] = 'Muhammad Umer Farooq'
A_ : Tuple = 'MIT'
A_ : int = '1.0.0'
A_ : Optional[int] = 'Muhammad Umer Farooq'
A_ : Dict = 'contact@muhammadumerfarooq.me'
A_ : Optional[int] = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ):
super().__init__()
UpperCamelCase_: list[str] = []
UpperCamelCase_: List[str] = domain
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
UpperCamelCase_: Tuple = parse.urljoin(self.domain , _lowerCamelCase )
self.urls.append(_lowerCamelCase )
def snake_case (UpperCAmelCase__ ) -> str:
return ".".join(get_sub_domain_name(UpperCAmelCase__ ).split('.' )[-2:] )
def snake_case (UpperCAmelCase__ ) -> str:
return parse.urlparse(UpperCAmelCase__ ).netloc
def snake_case (UpperCAmelCase__ = "https://github.com" ) -> list[str]:
UpperCamelCase_: Union[str, Any] = get_domain_name(UpperCAmelCase__ )
# Initialize the parser
UpperCamelCase_: Union[str, Any] = Parser(UpperCAmelCase__ )
try:
# Open URL
UpperCamelCase_: Dict = requests.get(UpperCAmelCase__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
UpperCamelCase_: Optional[int] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
UpperCamelCase_: Dict = requests.get(UpperCAmelCase__ )
# Get the valid email.
UpperCamelCase_: Dict = re.findall('[a-zA-Z0-9]+@' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(UpperCAmelCase__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(UpperCAmelCase__ )
if __name__ == "__main__":
A_ : Optional[Any] = emails_from_url('https://github.com')
print(F'''{len(emails)} emails found:''')
print('\n'.join(sorted(emails)))
| 57
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = """transfo-xl"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""mems"""]
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCAmelCase__=267_735 , lowerCAmelCase__=[20_000, 40_000, 200_000] , lowerCAmelCase__=1_024 , lowerCAmelCase__=1_024 , lowerCAmelCase__=16 , lowerCAmelCase__=64 , lowerCAmelCase__=4_096 , lowerCAmelCase__=4 , lowerCAmelCase__=False , lowerCAmelCase__=18 , lowerCAmelCase__=1_600 , lowerCAmelCase__=1_000 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=0 , lowerCAmelCase__=-1 , lowerCAmelCase__=True , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__="normal" , lowerCAmelCase__=0.01 , lowerCAmelCase__=0.01 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1e-5 , lowerCAmelCase__=0 , **lowerCAmelCase__ , ) -> Any:
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = []
self.cutoffs.extend(lowerCAmelCase__ )
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE = [False] + [True] * len(self.cutoffs )
else:
SCREAMING_SNAKE_CASE = [False] + [False] * len(self.cutoffs )
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = d_embed
SCREAMING_SNAKE_CASE = d_head
SCREAMING_SNAKE_CASE = d_inner
SCREAMING_SNAKE_CASE = div_val
SCREAMING_SNAKE_CASE = pre_lnorm
SCREAMING_SNAKE_CASE = n_layer
SCREAMING_SNAKE_CASE = n_head
SCREAMING_SNAKE_CASE = mem_len
SCREAMING_SNAKE_CASE = same_length
SCREAMING_SNAKE_CASE = attn_type
SCREAMING_SNAKE_CASE = clamp_len
SCREAMING_SNAKE_CASE = sample_softmax
SCREAMING_SNAKE_CASE = adaptive
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = dropatt
SCREAMING_SNAKE_CASE = untie_r
SCREAMING_SNAKE_CASE = init
SCREAMING_SNAKE_CASE = init_range
SCREAMING_SNAKE_CASE = proj_init_std
SCREAMING_SNAKE_CASE = init_std
SCREAMING_SNAKE_CASE = layer_norm_epsilon
super().__init__(eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __A ( self ) -> List[str]:
# Message copied from Transformer-XL documentation
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def __A ( self , lowerCAmelCase__ ) -> int:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 247
| 0
|
def __UpperCamelCase ( a, a) ->Optional[int]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(a, int(b / 2)) * actual_power(a, int(b / 2))
else:
return a * actual_power(a, int(b / 2)) * actual_power(a, int(b / 2))
def __UpperCamelCase ( a, a) ->float:
if b < 0:
return 1 / actual_power(a, a)
return actual_power(a, a)
if __name__ == "__main__":
print(power(-2, -3))
| 360
|
from __future__ import annotations
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ = text, pattern
lowerCamelCase__ , lowerCamelCase__ = len(_lowerCAmelCase ), len(_lowerCAmelCase )
def __magic_name__ ( self , _lowerCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __magic_name__ ( self , _lowerCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __magic_name__ ( self ):
# searches pattern in text and returns index positions
lowerCamelCase__ = []
for i in range(self.textLen - self.patLen + 1 ):
lowerCamelCase__ = self.mismatch_in_text(_lowerCAmelCase )
if mismatch_index == -1:
positions.append(_lowerCAmelCase )
else:
lowerCamelCase__ = self.match_in_pattern(self.text[mismatch_index] )
lowerCamelCase__ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A_ = "ABAABA"
A_ = "AB"
A_ = BoyerMooreSearch(text, pattern)
A_ = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 360
| 1
|
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_SCREAMING_SNAKE_CASE = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
_SCREAMING_SNAKE_CASE = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def snake_case ( snake_case__ :List[Any] , snake_case__ :Dict , snake_case__ :Tuple) -> Dict:
_A = SavedModel()
_A = []
with open(os.path.join(a__ , """utils""" , """tf_ops""" , """onnx.json""")) as f:
_A = json.load(a__)["""opsets"""]
for i in range(1 , opset + 1):
onnx_ops.extend(onnx_opsets[str(a__)])
with open(a__ , """rb""") as f:
saved_model.ParseFromString(f.read())
_A = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node)
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def)
# Convert to list, sorted if you want
_A = sorted(a__)
_A = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(a__)
if strict and len(a__) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops)
elif len(a__) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''')
print(*a__ , sep="""\n""")
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''')
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=12, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 401
|
'''simple docstring'''
import math
def a__ ( a__ ):
"""simple docstring"""
return math.sqrt(a__ ) * math.sqrt(a__ ) == num
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = n
while left <= right:
__SCREAMING_SNAKE_CASE = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
__SCREAMING_SNAKE_CASE = mid - 1
else:
__SCREAMING_SNAKE_CASE = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 627
| 0
|
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class lowerCamelCase__ ( yaml.SafeLoader ):
"""simple docstring"""
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = [self.constructed_objects[key_node] for key_node, _ in node.value]
__UpperCAmelCase : str = [tuple(a_ ) if isinstance(a_ , a_ ) else key for key in keys]
__UpperCAmelCase : List[Any] = Counter(a_ )
__UpperCAmelCase : Union[str, Any] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def lowerCamelCase__ ( self : str , UpperCamelCase : int , UpperCamelCase : Optional[int]=False ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = super().construct_mapping(a_ , deep=a_ )
self._check_no_duplicates_on_constructed_node(a_ )
return mapping
def lowerCamelCase ( _UpperCamelCase : List[str] ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : List[str] = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
__UpperCAmelCase : str = full_content[1:].index("""---""" ) + 1
__UpperCAmelCase : Optional[int] = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase_ )
class lowerCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__a = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def lowerCamelCase__ ( cls : Dict , UpperCamelCase : Dict ):
'''simple docstring'''
with open(a_ , encoding="""utf-8""" ) as readme_file:
__UpperCAmelCase : Any = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(a_ )
else:
return cls()
def lowerCamelCase__ ( self : str , UpperCamelCase : Tuple ):
'''simple docstring'''
if path.exists():
with open(a_ , encoding="""utf-8""" ) as readme_file:
__UpperCAmelCase : Optional[Any] = readme_file.read()
else:
__UpperCAmelCase : Any = None
__UpperCAmelCase : List[str] = self._to_readme(a_ )
with open(a_ , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(a_ )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Optional[Any] = None ):
'''simple docstring'''
if readme_content is not None:
__UpperCAmelCase : Optional[int] = _split_yaml_from_readme(a_ )
__UpperCAmelCase : int = "---\n" + self.to_yaml_string() + "---\n" + content
else:
__UpperCAmelCase : Any = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def lowerCamelCase__ ( cls : int , UpperCamelCase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : str = yaml.load(a_ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
__UpperCAmelCase : List[str] = {
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**a_ )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=a_ , allow_unicode=a_ , encoding="""utf-8""" , ).decode("""utf-8""" )
UpperCAmelCase : Tuple = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
UpperCAmelCase : List[Any] = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
UpperCAmelCase : int = ap.parse_args()
UpperCAmelCase : str = Path(args.readme_filepath)
UpperCAmelCase : Tuple = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 707
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCAmelCase : str = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
UpperCAmelCase : str = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Any = {}
with open(_UpperCamelCase , """r""" ) as file:
for line_number, line in enumerate(_UpperCamelCase ):
__UpperCAmelCase : List[Any] = line.strip()
if line:
__UpperCAmelCase : List[Any] = line.split()
__UpperCAmelCase : List[str] = line_number
__UpperCAmelCase : List[str] = words[0]
__UpperCAmelCase : Dict = value
return result
def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
for attribute in key.split(""".""" ):
__UpperCAmelCase : Any = getattr(_UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase : List[str] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_UpperCamelCase ):
__UpperCAmelCase : Optional[int] = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__UpperCAmelCase : Tuple = """param"""
if weight_type is not None and weight_type != "param":
__UpperCAmelCase : Dict = getattr(_UpperCamelCase , _UpperCamelCase ).shape
elif weight_type is not None and weight_type == "param":
__UpperCAmelCase : Any = hf_pointer
for attribute in hf_param_name.split(""".""" ):
__UpperCAmelCase : Union[str, Any] = getattr(_UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase : Optional[Any] = shape_pointer.shape
# let's reduce dimension
__UpperCAmelCase : Dict = value[0]
else:
__UpperCAmelCase : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__UpperCAmelCase : int = value
elif weight_type == "weight_g":
__UpperCAmelCase : Optional[int] = value
elif weight_type == "weight_v":
__UpperCAmelCase : int = value
elif weight_type == "bias":
__UpperCAmelCase : Any = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
__UpperCAmelCase : Dict = getattr(_UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase : List[str] = value
else:
__UpperCAmelCase : List[Any] = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : str ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_UpperCamelCase ):
__UpperCAmelCase : Tuple = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__UpperCAmelCase : int = """param"""
if weight_type is not None and weight_type != "param":
__UpperCAmelCase : Optional[int] = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__UpperCAmelCase : Optional[Any] = """.""".join([key, hf_param_name] )
else:
__UpperCAmelCase : List[str] = key
__UpperCAmelCase : Tuple = value if """lm_head""" in full_key else value[0]
UpperCAmelCase : Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : Any=None , _UpperCamelCase : str=None ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : List[Any] = False
for key, mapped_key in MAPPING.items():
__UpperCAmelCase : Optional[Any] = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__UpperCAmelCase : Dict = True
if "*" in mapped_key:
__UpperCAmelCase : str = name.split(_UpperCamelCase )[0].split(""".""" )[-2]
__UpperCAmelCase : Dict = mapped_key.replace("""*""" , _UpperCamelCase )
if "weight_g" in name:
__UpperCAmelCase : List[Any] = """weight_g"""
elif "weight_v" in name:
__UpperCAmelCase : List[str] = """weight_v"""
elif "bias" in name:
__UpperCAmelCase : Any = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCAmelCase : Dict = """weight"""
else:
__UpperCAmelCase : Optional[Any] = None
if hf_dict is not None:
rename_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return is_used
return is_used
def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : str ) -> Any:
'''simple docstring'''
__UpperCAmelCase : str = []
__UpperCAmelCase : Dict = fairseq_model.state_dict()
__UpperCAmelCase : Optional[Any] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__UpperCAmelCase : Tuple = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , )
__UpperCAmelCase : List[Any] = True
else:
__UpperCAmelCase : Any = load_wavaveca_layer(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] ) -> str:
'''simple docstring'''
__UpperCAmelCase : int = full_name.split("""conv_layers.""" )[-1]
__UpperCAmelCase : Dict = name.split(""".""" )
__UpperCAmelCase : int = int(items[0] )
__UpperCAmelCase : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__UpperCAmelCase : Union[str, Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__UpperCAmelCase : List[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
__UpperCAmelCase : int = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
__UpperCAmelCase : List[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Dict=False ) -> List[Any]:
'''simple docstring'''
if config_path is not None:
__UpperCAmelCase : List[str] = WavaVecaConfig.from_pretrained(_UpperCamelCase )
else:
__UpperCAmelCase : List[Any] = WavaVecaConfig()
if is_seq_class:
__UpperCAmelCase : List[Any] = read_txt_into_dict(_UpperCamelCase )
__UpperCAmelCase : str = idalabel
__UpperCAmelCase : Dict = WavaVecaForSequenceClassification(_UpperCamelCase )
__UpperCAmelCase : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
feature_extractor.save_pretrained(_UpperCamelCase )
elif is_finetuned:
if dict_path:
__UpperCAmelCase : Union[str, Any] = Dictionary.load(_UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCAmelCase : Optional[Any] = target_dict.pad_index
__UpperCAmelCase : Union[str, Any] = target_dict.bos_index
__UpperCAmelCase : Optional[int] = target_dict.eos_index
__UpperCAmelCase : str = len(target_dict.symbols )
__UpperCAmelCase : List[Any] = os.path.join(_UpperCamelCase , """vocab.json""" )
if not os.path.isdir(_UpperCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_UpperCamelCase ) )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
__UpperCAmelCase : List[str] = target_dict.indices
# fairseq has the <pad> and <s> switched
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Union[str, Any] = 1
with open(_UpperCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(_UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase : int = WavaVecaCTCTokenizer(
_UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_UpperCamelCase , )
__UpperCAmelCase : Union[str, Any] = True if config.feat_extract_norm == """layer""" else False
__UpperCAmelCase : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
__UpperCAmelCase : Any = WavaVecaProcessor(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
__UpperCAmelCase : int = WavaVecaForCTC(_UpperCamelCase )
else:
__UpperCAmelCase : str = WavaVecaForPreTraining(_UpperCamelCase )
if is_finetuned or is_seq_class:
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__UpperCAmelCase : Tuple = argparse.Namespace(task="""audio_pretraining""" )
__UpperCAmelCase : Dict = fairseq.tasks.setup_task(_UpperCamelCase )
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_UpperCamelCase )
__UpperCAmelCase : Optional[Any] = model[0].eval()
recursively_load_weights(_UpperCamelCase , _UpperCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
UpperCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
UpperCAmelCase : Union[str, Any] = parser.parse_args()
UpperCAmelCase : Optional[int] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 299
| 0
|
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def snake_case_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , ):
if config_name_or_path is None:
UpperCAmelCase__ : Optional[Any] = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
UpperCAmelCase__ : Optional[int] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
UpperCAmelCase__ : Tuple = question_encoder_name_or_path
UpperCAmelCase__ : Optional[int] = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
UpperCAmelCase__ : int = RagConfig.from_pretrained(__A )
UpperCAmelCase__ : int = AutoConfig.from_pretrained(__A )
UpperCAmelCase__ : List[Any] = AutoConfig.from_pretrained(__A )
UpperCAmelCase__ : List[Any] = gen_config
UpperCAmelCase__ : List[Any] = question_encoder_config
UpperCAmelCase__ : List[str] = model_class.from_pretrained_question_encoder_generator(
__A , __A , config=__A )
rag_model.save_pretrained(__A )
# Sanity check.
model_class.from_pretrained(__A )
# Save tokenizers.
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(__A )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(__A )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 199
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A__ ( __A : Any , __A : str , __A : str , __A : Path , __A : str = None , __A : str = None , __A : str = None , ) ->Optional[Any]:
if config_name_or_path is None:
__A ='''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base'''
if generator_tokenizer_name_or_path is None:
__A =generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__A =question_encoder_name_or_path
__A =RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration
# Save model.
__A =RagConfig.from_pretrained(__A )
__A =AutoConfig.from_pretrained(__A )
__A =AutoConfig.from_pretrained(__A )
__A =gen_config
__A =question_encoder_config
__A =model_class.from_pretrained_question_encoder_generator(
__A , __A , config=__A )
rag_model.save_pretrained(__A )
# Sanity check.
model_class.from_pretrained(__A )
# Save tokenizers.
__A =AutoTokenizer.from_pretrained(__A )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
__A =AutoTokenizer.from_pretrained(__A )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
_lowerCamelCase : str = parser.parse_args()
_lowerCamelCase : Optional[int] = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 184
| 0
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
__A ={
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
__A =[
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for attribute in key.split("." ):
lowerCamelCase_ = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
lowerCamelCase_ = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
lowerCamelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
lowerCamelCase_ = value
elif weight_type == "weight_g":
lowerCamelCase_ = value
elif weight_type == "weight_v":
lowerCamelCase_ = value
elif weight_type == "bias":
lowerCamelCase_ = value
else:
lowerCamelCase_ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = []
lowerCamelCase_ = fairseq_model.state_dict()
lowerCamelCase_ = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase_ = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase_ = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase_ = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
lowerCamelCase_ = True
if "*" in mapped_key:
lowerCamelCase_ = name.split(lowerCamelCase__ )[0].split("." )[-2]
lowerCamelCase_ = mapped_key.replace("*" , lowerCamelCase__ )
if "weight_g" in name:
lowerCamelCase_ = "weight_g"
elif "weight_v" in name:
lowerCamelCase_ = "weight_v"
elif "bias" in name:
lowerCamelCase_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase_ = "weight"
else:
lowerCamelCase_ = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'Unused weights: {unused_weights}' )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = full_name.split("conv_layers." )[-1]
lowerCamelCase_ = name.split("." )
lowerCamelCase_ = int(items[0] )
lowerCamelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
lowerCamelCase_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
lowerCamelCase_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' )
lowerCamelCase_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' )
lowerCamelCase_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCamelCase__ )
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True ):
if config_path is not None:
lowerCamelCase_ = UniSpeechSatConfig.from_pretrained(lowerCamelCase__ )
else:
lowerCamelCase_ = UniSpeechSatConfig()
lowerCamelCase_ = ""
if is_finetuned:
lowerCamelCase_ = UniSpeechSatForCTC(lowerCamelCase__ )
else:
lowerCamelCase_ = UniSpeechSatForPreTraining(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
lowerCamelCase_ = model[0].eval()
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ )
hf_wavavec.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__A =parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 313
|
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def lowerCamelCase_ ( lowerCamelCase__ ):
if "cls_token" in name:
lowerCamelCase_ = name.replace("cls_token" , "vit.embeddings.cls_token" )
if "mask_token" in name:
lowerCamelCase_ = name.replace("mask_token" , "decoder.mask_token" )
if "decoder_pos_embed" in name:
lowerCamelCase_ = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
lowerCamelCase_ = name.replace("pos_embed" , "vit.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace("patch_embed.proj" , "vit.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowerCamelCase_ = name.replace("patch_embed.norm" , "vit.embeddings.norm" )
if "decoder_blocks" in name:
lowerCamelCase_ = name.replace("decoder_blocks" , "decoder.decoder_layers" )
if "blocks" in name:
lowerCamelCase_ = name.replace("blocks" , "vit.encoder.layer" )
if "attn.proj" in name:
lowerCamelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
lowerCamelCase_ = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
lowerCamelCase_ = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
lowerCamelCase_ = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name:
lowerCamelCase_ = name.replace("norm.weight" , "vit.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name:
lowerCamelCase_ = name.replace("norm.bias" , "vit.layernorm.bias" )
return name
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowerCamelCase_ = key.split("." )
lowerCamelCase_ = int(key_split[1] )
if "decoder_blocks" in key:
lowerCamelCase_ = config.decoder_hidden_size
lowerCamelCase_ = "decoder.decoder_layers."
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[dim : dim * 2, :]
lowerCamelCase_ = val[-dim:, :]
elif "bias" in key:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = config.hidden_size
lowerCamelCase_ = "vit.encoder.layer."
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[dim : dim * 2, :]
lowerCamelCase_ = val[-dim:, :]
elif "bias" in key:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = val
return orig_state_dict
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = ViTMAEConfig()
if "large" in checkpoint_url:
lowerCamelCase_ = 1_0_2_4
lowerCamelCase_ = 4_0_9_6
lowerCamelCase_ = 2_4
lowerCamelCase_ = 1_6
elif "huge" in checkpoint_url:
lowerCamelCase_ = 1_4
lowerCamelCase_ = 1_2_8_0
lowerCamelCase_ = 5_1_2_0
lowerCamelCase_ = 3_2
lowerCamelCase_ = 1_6
lowerCamelCase_ = ViTMAEForPreTraining(lowerCamelCase__ )
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )["model"]
lowerCamelCase_ = ViTMAEImageProcessor(size=config.image_size )
lowerCamelCase_ = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
lowerCamelCase_ = "https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"
lowerCamelCase_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
lowerCamelCase_ = ViTMAEImageProcessor(size=config.image_size )
lowerCamelCase_ = image_processor(images=lowerCamelCase__ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.logits
if "large" in checkpoint_url:
lowerCamelCase_ = torch.tensor(
[[-0.73_09, -0.71_28, -1.01_69], [-1.01_61, -0.90_58, -1.18_78], [-1.04_78, -0.94_11, -1.19_11]] )
elif "huge" in checkpoint_url:
lowerCamelCase_ = torch.tensor(
[[-1.15_99, -0.91_99, -1.22_21], [-1.19_52, -0.92_69, -1.23_07], [-1.21_43, -0.93_37, -1.22_62]] )
else:
lowerCamelCase_ = torch.tensor(
[[-0.91_92, -0.84_81, -1.12_59], [-1.13_49, -1.00_34, -1.25_99], [-1.17_57, -1.04_29, -1.27_26]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__A =parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 313
| 1
|
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowercase_ : Tuple = 3
def A__ ( snake_case_ : int ):
print('''Generating primitive root of p''' )
while True:
SCREAMING_SNAKE_CASE__: List[Any]= random.randrange(3 , snake_case_ )
if pow(snake_case_ , 2 , snake_case_ ) == 1:
continue
if pow(snake_case_ , snake_case_ , snake_case_ ) == 1:
continue
return g
def A__ ( snake_case_ : int ):
print('''Generating prime p...''' )
SCREAMING_SNAKE_CASE__: List[Any]= rabin_miller.generate_large_prime(snake_case_ ) # select large prime number.
SCREAMING_SNAKE_CASE__: int= primitive_root(snake_case_ ) # one primitive root on modulo p.
SCREAMING_SNAKE_CASE__: int= random.randrange(3 , snake_case_ ) # private_key -> have to be greater than 2 for safety.
SCREAMING_SNAKE_CASE__: str= cryptomath.find_mod_inverse(pow(snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
SCREAMING_SNAKE_CASE__: int= (key_size, e_a, e_a, p)
SCREAMING_SNAKE_CASE__: Union[str, Any]= (key_size, d)
return public_key, private_key
def A__ ( snake_case_ : str , snake_case_ : int ):
if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ):
print('''\nWARNING:''' )
print(
F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= generate_key(snake_case_ )
print(F'\nWriting public key to file {name}_pubkey.txt...' )
with open(F'{name}_pubkey.txt' , '''w''' ) as fo:
fo.write(F'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' )
print(F'Writing private key to file {name}_privkey.txt...' )
with open(F'{name}_privkey.txt' , '''w''' ) as fo:
fo.write(F'{private_key[0]},{private_key[1]}' )
def A__ ( ):
print('''Making key files...''' )
make_key_files('''elgamal''' , 2_048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 64
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : Optional[int] = logging.get_logger(__name__)
def A__ ( snake_case_ : List[Any] ):
SCREAMING_SNAKE_CASE__: str= torch.load(snake_case_ , map_location='''cpu''' )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE__: Any= torch.load(snake_case_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
SCREAMING_SNAKE_CASE__: List[str]= [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: str= {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE__: Union[str, Any]= sd.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: int= list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE__: int= sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE__: Optional[Any]= key.replace('''.qkv_proj.''' , '''.q_proj.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= key.replace('''.qkv_proj.''' , '''.k_proj.''' )
SCREAMING_SNAKE_CASE__: List[str]= key.replace('''.qkv_proj.''' , '''.v_proj.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= torch.split(snake_case_ , depth // 3 , dim=0 )
SCREAMING_SNAKE_CASE__: List[Any]= q
SCREAMING_SNAKE_CASE__: Any= k
SCREAMING_SNAKE_CASE__: Optional[Any]= v
del sd[key]
return sd
@torch.no_grad()
def A__ ( snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Tuple=None ):
SCREAMING_SNAKE_CASE__: List[str]= load_checkpoint(snake_case_ )
if config is not None:
SCREAMING_SNAKE_CASE__: Any= OPTConfig.from_pretrained(snake_case_ )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= OPTConfig()
SCREAMING_SNAKE_CASE__: Union[str, Any]= OPTModel(snake_case_ ).half().eval()
model.load_state_dict(snake_case_ )
# Check results
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
lowercase_ : int = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 64
| 1
|
'''simple docstring'''
import gc
import threading
import time
import psutil
import torch
class lowercase_ :
"""simple docstring"""
def __init__( self : List[Any] ):
__lowercase = psutil.Process()
__lowercase = False
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = -1
while True:
__lowercase = max(self.process.memory_info().rss ,self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = True
__lowercase = threading.Thread(target=self.peak_monitor )
__lowercase = True
self.thread.start()
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = False
self.thread.join()
return self.cpu_memory_peak
lowerCAmelCase__ = PeakCPUMemory()
def _A ( ):
"""simple docstring"""
__lowercase = {'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowercase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowercase = torch.cuda.memory_allocated(A__ )
torch.cuda.reset_peak_memory_stats()
return measures
def _A ( A__ ):
"""simple docstring"""
__lowercase = {'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowercase = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
__lowercase = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowercase = (torch.cuda.memory_allocated(A__ ) - start_measures[str(A__ )]) / 2**20
__lowercase = (torch.cuda.max_memory_allocated(A__ ) - start_measures[str(A__ )]) / 2**20
return measures
def _A ( A__ , A__ ):
"""simple docstring"""
print(F"{description}:" )
print(F"- Time: {measures['time']:.2f}s" )
for i in range(torch.cuda.device_count() ):
print(F"- GPU {i} allocated: {measures[str(A__ )]:.2f}MiB" )
__lowercase = measures[F"{i}-peak"]
print(F"- GPU {i} peak: {peak:.2f}MiB" )
print(F"- CPU RAM allocated: {measures['cpu']:.2f}MiB" )
print(F"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB" )
| 720
|
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
__lowercase = sum(A__ ) / len(A__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 624
| 0
|
"""simple docstring"""
import math
import qiskit
def _snake_case ( _snake_case : int = 1 , _snake_case : int = 1 , _snake_case : int = 1 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
if (
isinstance(_snake_case , _snake_case )
or isinstance(_snake_case , _snake_case )
or isinstance(_snake_case , _snake_case )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(_snake_case ) != input_a)
or (math.floor(_snake_case ) != input_a)
or (math.floor(_snake_case ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
_A = qiskit.QuantumRegister(4 , 'qr' )
_A = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
_A = [input_a, input_a, carry_in]
_A = qiskit.QuantumCircuit(_snake_case , _snake_case )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(_snake_case ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_snake_case ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_snake_case ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , _snake_case ) # measure the last two qbits
_A = qiskit.Aer.get_backend('aer_simulator' )
_A = qiskit.execute(_snake_case , _snake_case , shots=10_00 )
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
print(F'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 7
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : List[str] = ['''image_processor''', '''tokenizer''']
UpperCAmelCase__ : Dict = '''CLIPImageProcessor'''
UpperCAmelCase__ : Optional[Any] = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self :Optional[Any] ,__snake_case :Dict=None ,__snake_case :Optional[Any]=None ,**__snake_case :Optional[Any] ) -> Optional[Any]:
a__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,__snake_case ,)
a__ = kwargs.pop('feature_extractor' )
a__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__snake_case ,__snake_case )
def __call__( self :Optional[Any] ,__snake_case :Optional[Any]=None ,__snake_case :Optional[int]=None ,__snake_case :Any=None ,**__snake_case :List[Any] ) -> Dict:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
a__ = self.tokenizer(__snake_case ,return_tensors=__snake_case ,**__snake_case )
if images is not None:
a__ = self.image_processor(__snake_case ,return_tensors=__snake_case ,**__snake_case )
if text is not None and images is not None:
a__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__snake_case ) ,tensor_type=__snake_case )
def lowerCamelCase__( self :List[Any] ,*__snake_case :Union[str, Any] ,**__snake_case :Optional[int] ) -> Tuple:
return self.tokenizer.batch_decode(*__snake_case ,**__snake_case )
def lowerCamelCase__( self :List[str] ,*__snake_case :Any ,**__snake_case :str ) -> Dict:
return self.tokenizer.decode(*__snake_case ,**__snake_case )
@property
def lowerCamelCase__( self :Dict ) -> Any:
a__ = self.tokenizer.model_input_names
a__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 335
| 0
|
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(_a )
class lowerCAmelCase ( _a ):
def __init__( self , **lowerCAmelCase__ ):
super().__init__(**lowerCAmelCase__ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , lowerCAmelCase__ , **lowerCAmelCase__ ):
return super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
def a__ ( self , **lowerCAmelCase__ ):
_A= {}
if "candidate_labels" in kwargs:
_A= kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_A= kwargs['hypothesis_template']
return preprocess_params, {}, {}
def a__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__="This is a photo of {}." ):
_A= load_image(lowerCAmelCase__ )
_A= self.image_processor(images=[image] , return_tensors=self.framework )
_A= candidate_labels
_A= [hypothesis_template.format(lowerCAmelCase__ ) for x in candidate_labels]
_A= self.tokenizer(lowerCAmelCase__ , return_tensors=self.framework , padding=lowerCAmelCase__ )
_A= [text_inputs]
return inputs
def a__ ( self , lowerCAmelCase__ ):
_A= model_inputs.pop('candidate_labels' )
_A= model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , lowerCAmelCase__ ):
_A= text_inputs[0]
else:
# Batching case.
_A= text_inputs[0][0]
_A= self.model(**lowerCAmelCase__ , **lowerCAmelCase__ )
_A= {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def a__ ( self , lowerCAmelCase__ ):
_A= model_outputs.pop('candidate_labels' )
_A= model_outputs['logits'][0]
if self.framework == "pt":
_A= logits.softmax(dim=-1 ).squeeze(-1 )
_A= probs.tolist()
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_A= [scores]
elif self.framework == "tf":
_A= stable_softmax(lowerCAmelCase__ , axis=-1 )
_A= probs.numpy().tolist()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
_A= [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(lowerCAmelCase__ , lowerCAmelCase__ ) , key=lambda lowerCAmelCase__ : -x[0] )
]
return result
| 476
|
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase ( unittest.TestCase ):
def a__ ( self ):
_A= torch.nn.Linear(10 , 10 )
_A= torch.optim.SGD(model.parameters() , 0.1 )
_A= Accelerator()
_A= accelerator.prepare(lowerCAmelCase__ )
try:
pickle.loads(pickle.dumps(lowerCAmelCase__ ) )
except Exception as e:
self.fail(f"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 476
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any]=False ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class in get_values(UpperCAmelCase ):
lowercase : Union[str, Any] =tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any=13 , UpperCAmelCase : int=7 , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Tuple=99 , UpperCAmelCase : Any=32 , UpperCAmelCase : str=32 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Any=37 , UpperCAmelCase : List[Any]="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : Optional[Any]=16 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : List[str]=None , ) -> int:
'''simple docstring'''
lowercase : Dict =parent
lowercase : Optional[int] =batch_size
lowercase : Optional[Any] =seq_length
lowercase : Tuple =is_training
lowercase : Dict =use_input_mask
lowercase : Any =use_token_type_ids
lowercase : int =use_labels
lowercase : int =vocab_size
lowercase : Dict =hidden_size
lowercase : Tuple =num_hidden_layers
lowercase : Optional[int] =num_attention_heads
lowercase : Dict =intermediate_size
lowercase : Tuple =hidden_act
lowercase : str =hidden_dropout_prob
lowercase : Optional[Any] =attention_probs_dropout_prob
lowercase : Any =max_position_embeddings
lowercase : List[Any] =type_vocab_size
lowercase : List[str] =type_sequence_label_size
lowercase : int =initializer_range
lowercase : int =num_labels
lowercase : Optional[int] =num_choices
lowercase : int =scope
lowercase : List[str] =embedding_size
def A__ ( self : Any ) -> List[Any]:
'''simple docstring'''
lowercase : Any =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : int =None
if self.use_input_mask:
lowercase : Optional[int] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : List[Any] =None
if self.use_token_type_ids:
lowercase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : int =None
lowercase : Optional[Any] =None
lowercase : Optional[Any] =None
if self.use_labels:
lowercase : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Dict =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict =MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
lowercase : int =TFMobileBertModel(config=UpperCAmelCase )
lowercase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : List[Any] =model(UpperCAmelCase )
lowercase : Optional[Any] =[input_ids, input_mask]
lowercase : Union[str, Any] =model(UpperCAmelCase )
lowercase : List[Any] =model(UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A__ ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =TFMobileBertForMaskedLM(config=UpperCAmelCase )
lowercase : Union[str, Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Any =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =TFMobileBertForNextSentencePrediction(config=UpperCAmelCase )
lowercase : Union[str, Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Optional[int] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A__ ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
lowercase : Dict =TFMobileBertForPreTraining(config=UpperCAmelCase )
lowercase : Dict ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A__ ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : Tuple =TFMobileBertForSequenceClassification(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : int =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] =self.num_choices
lowercase : Tuple =TFMobileBertForMultipleChoice(config=UpperCAmelCase )
lowercase : Union[str, Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Optional[int] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Dict =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Optional[int] ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : List[str] =TFMobileBertForTokenClassification(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : int =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =TFMobileBertForQuestionAnswering(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
lowercase : int =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : List[str] =config_and_inputs
lowercase : Optional[int] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def A__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase : str =TFMobileBertModelTest.TFMobileBertModelTester(self )
lowercase : Any =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self : List[str] ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : Tuple ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*UpperCAmelCase )
def A__ ( self : Tuple ) -> int:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCAmelCase )
def A__ ( self : str ) -> Tuple:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : int ) -> int:
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCAmelCase )
def A__ ( self : Any ) -> int:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCAmelCase )
def A__ ( self : Dict ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCAmelCase )
def A__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : Any ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
lowercase : Any =TFMobileBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Any ) -> Dict:
'''simple docstring'''
lowercase : Any =TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
lowercase : Optional[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : Dict =model(UpperCAmelCase )[0]
lowercase : Optional[int] =[1, 6, 3_0522]
self.assertEqual(output.shape , UpperCAmelCase )
lowercase : Dict =tf.constant(
[
[
[-4.5_9_1_9_5_4_7, -9.2_4_8_2_9_5, -9.6_4_5_2_5_6],
[-6.7_3_0_6_1_7_5, -6.4_4_0_2_8_4, -6.6_0_5_2_8_3_7],
[-7.2_7_4_3_5_0_6, -6.7_8_4_7_9_1_5, -6.0_2_4_6_7_3],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
| 94
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__lowercase : Dict = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ['''pixel_values''']
def __init__( self ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = 1 / 255 ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = True ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = size if size is not None else {"""shortest_edge""": 224}
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : Optional[Any] = do_resize
snake_case : Union[str, Any] = size
snake_case : Dict = resample
snake_case : Dict = do_rescale
snake_case : Dict = rescale_factor
snake_case : List[str] = do_center_crop
snake_case : Dict = crop_size
snake_case : Any = do_flip_channel_order
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = PIL.Image.BILINEAR ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case : List[Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ ,size=size["""shortest_edge"""] ,default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(SCREAMING_SNAKE_CASE_ ,size=(size["""height"""], size["""width"""]) ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
return flip_channel_order(SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : List[Any] = do_resize if do_resize is not None else self.do_resize
snake_case : List[str] = resample if resample is not None else self.resample
snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
snake_case : Tuple = size if size is not None else self.size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else self.crop_size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
snake_case : Dict = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
snake_case : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
snake_case : Optional[Any] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
snake_case : Dict = [self.rescale(image=SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
snake_case : Optional[int] = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : List[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : int = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ ,tensor_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
snake_case : int = target_sizes.numpy()
snake_case : Optional[Any] = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
snake_case : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
snake_case : Tuple = logits.argmax(dim=1 )
snake_case : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 36
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class UpperCamelCase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase__ = "nllb-moe"
UpperCAmelCase__ = ["past_key_values"]
UpperCAmelCase__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[int] , UpperCAmelCase__ : Union[str, Any]=128_112 , UpperCAmelCase__ : Any=1_024 , UpperCAmelCase__ : Optional[Any]=12 , UpperCAmelCase__ : str=4_096 , UpperCAmelCase__ : List[Any]=16 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[Any]=4_096 , UpperCAmelCase__ : int=16 , UpperCAmelCase__ : Dict=0.05 , UpperCAmelCase__ : Dict=0.05 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Tuple="relu" , UpperCAmelCase__ : Any=1_024 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Dict="float32" , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : List[str]=128 , UpperCAmelCase__ : Any=64 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : str=0.001 , UpperCAmelCase__ : List[Any]=0.001 , UpperCAmelCase__ : Union[str, Any]="all" , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Tuple=1.0 , UpperCAmelCase__ : List[str]=0.2 , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Any=False , **UpperCAmelCase__ : Any , ) ->Tuple:
'''simple docstring'''
A__ = vocab_size
A__ = max_position_embeddings
A__ = d_model
A__ = encoder_ffn_dim
A__ = encoder_layers
A__ = encoder_attention_heads
A__ = decoder_ffn_dim
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = activation_function
A__ = init_std
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = use_cache
A__ = encoder_layers
A__ = scale_embedding # scale factor will be sqrt(d_model) if True
A__ = router_z_loss_coef
A__ = router_aux_loss_coef
A__ = decoder_sparse_step
A__ = encoder_sparse_step
A__ = num_experts
A__ = expert_capacity
A__ = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""")
A__ = router_dtype
A__ = router_ignore_padding_tokens
A__ = batch_prioritized_routing
A__ = second_expert_policy
A__ = normalize_router_prob_before_dropping
A__ = moe_eval_capacity_token_fraction
A__ = moe_token_dropout
A__ = output_router_logits
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
| 718
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_lowerCamelCase : List[Any] = """pt"""
elif is_tf_available():
_lowerCamelCase : int = """tf"""
else:
_lowerCamelCase : Dict = """jax"""
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = PerceiverTokenizer
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : int) ->int:
'''simple docstring'''
super().setUp()
A__ = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def SCREAMING_SNAKE_CASE ( self : Any) ->Any:
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''')
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , **UpperCAmelCase__ : List[str]) ->PerceiverTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : Union[str, Any]=20 , UpperCAmelCase__ : Dict=5) ->Tuple[str, list]:
'''simple docstring'''
A__ = []
for i in range(len(UpperCAmelCase__)):
try:
A__ = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCAmelCase__)
except UnicodeDecodeError:
pass
toks.append((i, tok))
A__ = list(filter(lambda UpperCAmelCase__: re.match(R'''^[ a-zA-Z]+$''' , t[1]) , UpperCAmelCase__))
A__ = list(filter(lambda UpperCAmelCase__: [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCAmelCase__) , UpperCAmelCase__))
if max_length is not None and len(UpperCAmelCase__) > max_length:
A__ = toks[:max_length]
if min_length is not None and len(UpperCAmelCase__) < min_length and len(UpperCAmelCase__) > 0:
while len(UpperCAmelCase__) < min_length:
A__ = toks + toks
# toks_str = [t[1] for t in toks]
A__ = [t[0] for t in toks]
# Ensure consistency
A__ = tokenizer.decode(UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__)
if " " not in output_txt and len(UpperCAmelCase__) > 1:
A__ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCAmelCase__)
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCAmelCase__)
)
if with_prefix_space:
A__ = ''' ''' + output_txt
A__ = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__)
return output_txt, output_ids
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
'''simple docstring'''
A__ = self.perceiver_tokenizer
A__ = '''Unicode €.'''
A__ = tokenizer(UpperCAmelCase__)
A__ = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''] , UpperCAmelCase__)
# decoding
A__ = tokenizer.decode(UpperCAmelCase__)
self.assertEqual(UpperCAmelCase__ , '''[CLS]Unicode €.[SEP]''')
A__ = tokenizer('''e è é ê ë''')
A__ = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''] , UpperCAmelCase__)
# decoding
A__ = tokenizer.decode(UpperCAmelCase__)
self.assertEqual(UpperCAmelCase__ , '''[CLS]e è é ê ë[SEP]''')
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''')) , '''[CLS]e è é ê ë[SEP]''')
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = self.perceiver_tokenizer
A__ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
A__ = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
A__ = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors=UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
if FRAMEWORK != "jax":
A__ = list(batch.input_ids.numpy()[0])
else:
A__ = list(batch.input_ids.tolist()[0])
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
self.assertEqual((2, 38) , batch.input_ids.shape)
self.assertEqual((2, 38) , batch.attention_mask.shape)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Any:
'''simple docstring'''
A__ = self.perceiver_tokenizer
A__ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
A__ = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors=UpperCAmelCase__)
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , UpperCAmelCase__)
self.assertIn('''attention_mask''' , UpperCAmelCase__)
self.assertNotIn('''decoder_input_ids''' , UpperCAmelCase__)
self.assertNotIn('''decoder_attention_mask''' , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple:
'''simple docstring'''
A__ = self.perceiver_tokenizer
A__ = [
'''Summary of the text.''',
'''Another summary.''',
]
A__ = tokenizer(
text_target=UpperCAmelCase__ , max_length=32 , padding='''max_length''' , truncation=UpperCAmelCase__ , return_tensors=UpperCAmelCase__)
self.assertEqual(32 , targets['''input_ids'''].shape[1])
def SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple:
'''simple docstring'''
A__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}"""):
self.assertNotEqual(tokenizer.model_max_length , 42)
# Now let's start the test
A__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}"""):
# Isolate this from the other tests because we save additional tokens/etc
A__ = tempfile.mkdtemp()
A__ = ''' He is very happy, UNwant\u00E9d,running'''
A__ = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__)
tokenizer.save_pretrained(UpperCAmelCase__)
A__ = tokenizer.__class__.from_pretrained(UpperCAmelCase__)
A__ = after_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
shutil.rmtree(UpperCAmelCase__)
A__ = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}"""):
# Isolate this from the other tests because we save additional tokens/etc
A__ = tempfile.mkdtemp()
A__ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''])
A__ = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''')
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens})
A__ = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__)
tokenizer.save_pretrained(UpperCAmelCase__)
A__ = tokenizer.__class__.from_pretrained(UpperCAmelCase__)
A__ = after_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length , 42)
A__ = tokenizer.__class__.from_pretrained(UpperCAmelCase__ , model_max_length=43)
self.assertEqual(tokenizer.model_max_length , 43)
shutil.rmtree(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[int]:
'''simple docstring'''
A__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCAmelCase__)
with open(os.path.join(UpperCAmelCase__ , '''special_tokens_map.json''') , encoding='''utf-8''') as json_file:
A__ = json.load(UpperCAmelCase__)
with open(os.path.join(UpperCAmelCase__ , '''tokenizer_config.json''') , encoding='''utf-8''') as json_file:
A__ = json.load(UpperCAmelCase__)
A__ = [f"""<extra_id_{i}>""" for i in range(125)]
A__ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
A__ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(UpperCAmelCase__ , '''special_tokens_map.json''') , '''w''' , encoding='''utf-8''') as outfile:
json.dump(UpperCAmelCase__ , UpperCAmelCase__)
with open(os.path.join(UpperCAmelCase__ , '''tokenizer_config.json''') , '''w''' , encoding='''utf-8''') as outfile:
json.dump(UpperCAmelCase__ , UpperCAmelCase__)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
A__ = tokenizer_class.from_pretrained(
UpperCAmelCase__ , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens)
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''])) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
A__ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=UpperCAmelCase__)]
A__ = tokenizer_class.from_pretrained(
UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens)
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''])) , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178]) , '''�''')
def SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Dict) ->List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Dict) ->str:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
'''simple docstring'''
A__ = self.get_tokenizers(fast=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__)
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}"""):
A__ = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
A__ = tokenizer.convert_tokens_to_string(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
| 177
| 0
|
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
a__ : Dict = logging.get_logger(__name__)
# General docstring
a__ : Any = '''PoolFormerConfig'''
# Base docstring
a__ : List[str] = '''sail/poolformer_s12'''
a__ : Tuple = [1, 512, 7, 7]
# Image classification docstring
a__ : Dict = '''sail/poolformer_s12'''
a__ : Optional[Any] = '''tabby, tabby cat'''
a__ : Any = [
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ = 0.0 , UpperCAmelCase_ = False ) ->Optional[int]:
if drop_prob == 0.0 or not training:
return input
snake_case__ = 1 - drop_prob
snake_case__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
snake_case__ = keep_prob + torch.rand(UpperCAmelCase_ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
snake_case__ = input.div(UpperCAmelCase_ ) * random_tensor
return output
class __snake_case ( nn.Module ):
def __init__( self , UpperCamelCase_ = None ) -> None:
super().__init__()
snake_case__ = drop_prob
def _snake_case ( self , UpperCamelCase_ ) -> torch.Tensor:
return drop_path(UpperCamelCase_ , self.drop_prob , self.training )
def _snake_case ( self ) -> str:
return "p={}".format(self.drop_prob )
class __snake_case ( nn.Module ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ) -> Optional[Any]:
super().__init__()
snake_case__ = patch_size if isinstance(UpperCamelCase_ , collections.abc.Iterable ) else (patch_size, patch_size)
snake_case__ = stride if isinstance(UpperCamelCase_ , collections.abc.Iterable ) else (stride, stride)
snake_case__ = padding if isinstance(UpperCamelCase_ , collections.abc.Iterable ) else (padding, padding)
snake_case__ = nn.Convad(UpperCamelCase_ , UpperCamelCase_ , kernel_size=UpperCamelCase_ , stride=UpperCamelCase_ , padding=UpperCamelCase_ )
snake_case__ = norm_layer(UpperCamelCase_ ) if norm_layer else nn.Identity()
def _snake_case ( self , UpperCamelCase_ ) -> int:
snake_case__ = self.projection(UpperCamelCase_ )
snake_case__ = self.norm(UpperCamelCase_ )
return embeddings
class __snake_case ( nn.GroupNorm ):
def __init__( self , UpperCamelCase_ , **UpperCamelCase_ ) -> Dict:
super().__init__(1 , UpperCamelCase_ , **UpperCamelCase_ )
class __snake_case ( nn.Module ):
def __init__( self , UpperCamelCase_ ) -> int:
super().__init__()
snake_case__ = nn.AvgPoolad(UpperCamelCase_ , stride=1 , padding=pool_size // 2 , count_include_pad=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ) -> str:
return self.pool(UpperCamelCase_ ) - hidden_states
class __snake_case ( nn.Module ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
super().__init__()
snake_case__ = nn.Convad(UpperCamelCase_ , UpperCamelCase_ , 1 )
snake_case__ = nn.Convad(UpperCamelCase_ , UpperCamelCase_ , 1 )
snake_case__ = PoolFormerDropPath(UpperCamelCase_ )
if isinstance(config.hidden_act , UpperCamelCase_ ):
snake_case__ = ACTaFN[config.hidden_act]
else:
snake_case__ = config.hidden_act
def _snake_case ( self , UpperCamelCase_ ) -> Any:
snake_case__ = self.conva(UpperCamelCase_ )
snake_case__ = self.act_fn(UpperCamelCase_ )
snake_case__ = self.drop(UpperCamelCase_ )
snake_case__ = self.conva(UpperCamelCase_ )
snake_case__ = self.drop(UpperCamelCase_ )
return hidden_states
class __snake_case ( nn.Module ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
super().__init__()
snake_case__ = PoolFormerPooling(UpperCamelCase_ )
snake_case__ = PoolFormerOutput(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
snake_case__ = PoolFormerGroupNorm(UpperCamelCase_ )
snake_case__ = PoolFormerGroupNorm(UpperCamelCase_ )
# Useful for training neural nets
snake_case__ = PoolFormerDropPath(UpperCamelCase_ ) if drop_path > 0.0 else nn.Identity()
snake_case__ = config.use_layer_scale
if config.use_layer_scale:
snake_case__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase_) ) , requires_grad=UpperCamelCase_ )
snake_case__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase_) ) , requires_grad=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ) -> List[str]:
if self.use_layer_scale:
snake_case__ = self.pooling(self.before_norm(UpperCamelCase_ ) )
snake_case__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
snake_case__ = hidden_states + self.drop_path(UpperCamelCase_ )
snake_case__ = ()
snake_case__ = self.output(self.after_norm(UpperCamelCase_ ) )
snake_case__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
snake_case__ = hidden_states + self.drop_path(UpperCamelCase_ )
snake_case__ = (output,) + outputs
return outputs
else:
snake_case__ = self.drop_path(self.pooling(self.before_norm(UpperCamelCase_ ) ) )
# First residual connection
snake_case__ = pooling_output + hidden_states
snake_case__ = ()
# Second residual connection inside the PoolFormerOutput block
snake_case__ = self.drop_path(self.output(self.after_norm(UpperCamelCase_ ) ) )
snake_case__ = hidden_states + layer_output
snake_case__ = (output,) + outputs
return outputs
class __snake_case ( nn.Module ):
def __init__( self , UpperCamelCase_ ) -> Dict:
super().__init__()
snake_case__ = config
# stochastic depth decay rule
snake_case__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
snake_case__ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
snake_case__ = nn.ModuleList(UpperCamelCase_ )
# Transformer blocks
snake_case__ = []
snake_case__ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
snake_case__ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
UpperCamelCase_ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(UpperCamelCase_ ) )
snake_case__ = nn.ModuleList(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=True ) -> Dict:
snake_case__ = () if output_hidden_states else None
snake_case__ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
snake_case__ , snake_case__ = layers
# Get patch embeddings from hidden_states
snake_case__ = embedding_layer(UpperCamelCase_ )
# Send the embeddings through the blocks
for _, blk in enumerate(UpperCamelCase_ ):
snake_case__ = blk(UpperCamelCase_ )
snake_case__ = layer_outputs[0]
if output_hidden_states:
snake_case__ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=UpperCamelCase_ , hidden_states=UpperCamelCase_ )
class __snake_case ( __magic_name__ ):
__lowerCAmelCase = PoolFormerConfig
__lowerCAmelCase = '''poolformer'''
__lowerCAmelCase = '''pixel_values'''
__lowerCAmelCase = True
def _snake_case ( self , UpperCamelCase_ ) -> str:
if isinstance(UpperCamelCase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase_ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False ) -> int:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
snake_case__ = value
a__ : int = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
a__ : Union[str, Any] = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , __magic_name__ , )
class __snake_case ( __magic_name__ ):
def __init__( self , UpperCamelCase_ ) -> Any:
super().__init__(UpperCamelCase_ )
snake_case__ = config
snake_case__ = PoolFormerEncoder(UpperCamelCase_ )
# Initialize weights and apply final processing
self.post_init()
def _snake_case ( self ) -> str:
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _snake_case ( self , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
snake_case__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
snake_case__ = self.encoder(
UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ , )
snake_case__ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCamelCase_ , hidden_states=encoder_outputs.hidden_states , )
class __snake_case ( nn.Module ):
def __init__( self , UpperCamelCase_ ) -> str:
super().__init__()
snake_case__ = nn.Linear(config.hidden_size , config.hidden_size )
def _snake_case ( self , UpperCamelCase_ ) -> Any:
snake_case__ = self.dense(UpperCamelCase_ )
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , __magic_name__ , )
class __snake_case ( __magic_name__ ):
def __init__( self , UpperCamelCase_ ) -> Optional[Any]:
super().__init__(UpperCamelCase_ )
snake_case__ = config.num_labels
snake_case__ = PoolFormerModel(UpperCamelCase_ )
# Final norm
snake_case__ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
snake_case__ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _snake_case ( self , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
snake_case__ = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ = self.poolformer(
UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ , )
snake_case__ = outputs[0]
snake_case__ = self.classifier(self.norm(UpperCamelCase_ ).mean([-2, -1] ) )
snake_case__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case__ = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case__ = 'single_label_classification'
else:
snake_case__ = 'multi_label_classification'
if self.config.problem_type == "regression":
snake_case__ = MSELoss()
if self.num_labels == 1:
snake_case__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case__ = loss_fct(UpperCamelCase_ , UpperCamelCase_ )
elif self.config.problem_type == "single_label_classification":
snake_case__ = CrossEntropyLoss()
snake_case__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case__ = BCEWithLogitsLoss()
snake_case__ = loss_fct(UpperCamelCase_ , UpperCamelCase_ )
if not return_dict:
snake_case__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCamelCase_ , logits=UpperCamelCase_ , hidden_states=outputs.hidden_states )
| 368
|
'''simple docstring'''
a__ : Optional[Any] = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def __lowerCamelCase ( UpperCAmelCase_ ) ->int:
snake_case__ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
snake_case__ = Stack()
snake_case__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(UpperCAmelCase_ ) )
elif i in operators:
# RULE 2
operator_stack.push(UpperCAmelCase_ )
elif i == ")":
# RULE 4
snake_case__ = operator_stack.peek()
operator_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operators[opr](UpperCAmelCase_ , UpperCAmelCase_ )
operand_stack.push(UpperCAmelCase_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
a__ : Any = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 368
| 1
|
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : int , a_ : Dict , a_ : Optional[int]=3 , a_ : Dict=7 , a_ : Optional[int]=True , a_ : Optional[Any]=True , a_ : Optional[int]=False , a_ : List[str]=True , a_ : List[str]=99 , a_ : Optional[int]=32 , a_ : Dict=5 , a_ : Any=4 , a_ : int=37 , a_ : Dict="gelu" , a_ : Any=0.1 , a_ : Union[str, Any]=0.1 , a_ : Any=512 , a_ : Union[str, Any]=16 , a_ : Union[str, Any]=2 , a_ : List[str]=0.02 , a_ : Dict=3 , a_ : Tuple=4 , a_ : int=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[int] ):
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=a_ , )
def A ( self : Optional[int] , a_ : Any , a_ : Union[str, Any] , a_ : List[str] , a_ : int , a_ : Optional[Any] , a_ : Optional[int] , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = FalconModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ )
__snake_case = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Any , a_ : List[Any] , a_ : List[Any] , a_ : List[str] , a_ : Tuple , a_ : Union[str, Any] , a_ : Tuple , a_ : List[Any] , a_ : Union[str, Any] , a_ : Tuple , ):
"""simple docstring"""
__snake_case = True
__snake_case = FalconModel(a_ )
model.to(a_ )
model.eval()
__snake_case = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )
__snake_case = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , )
__snake_case = model(a_ , attention_mask=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Optional[Any] , a_ : List[Any] , a_ : Optional[int] , a_ : Optional[int] , a_ : Dict , a_ : Tuple , a_ : Optional[int] , a_ : Optional[Any] , a_ : str , a_ : str , ):
"""simple docstring"""
__snake_case = FalconForCausalLM(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Any , a_ : Union[str, Any] , a_ : List[Any] , a_ : Optional[int] , a_ : List[str] , a_ : Union[str, Any] , a_ : Any , a_ : List[Any] , a_ : Union[str, Any] , a_ : Any , ):
"""simple docstring"""
__snake_case = True
__snake_case = True
__snake_case = FalconForCausalLM(config=a_ )
model.to(a_ )
model.eval()
# first forward pass
__snake_case = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , use_cache=a_ , )
__snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
__snake_case = torch.cat([input_mask, next_mask] , dim=-1 )
__snake_case = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , output_hidden_states=a_ , )["hidden_states"][0]
__snake_case = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , past_key_values=a_ , output_hidden_states=a_ , )["hidden_states"][0]
# select random slice
__snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1e-3 ) )
def A ( self : str ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = (FalconForCausalLM,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = FalconModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , hidden_size=37 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case , *__snake_case = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
__snake_case = alibi
self.model_tester.create_and_check_model(a_ , *a_ )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = input_dict["input_ids"]
__snake_case = input_ids.ne(1 ).to(a_ )
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__snake_case = FalconForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : str ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = "single_label_classification"
__snake_case = input_dict["input_ids"]
__snake_case = input_ids.ne(1 ).to(a_ )
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__snake_case = FalconForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : str ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = input_dict["input_ids"]
__snake_case = FalconForCausalLM(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , use_cache=a_ )
__snake_case = input_ids.shape[0]
__snake_case = model._convert_to_rw_cache(result.past_key_values )
__snake_case = model._convert_cache_to_standard_format(a_ , a_ )
for layer in range(len(a_ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = "multi_label_classification"
__snake_case = input_dict["input_ids"]
__snake_case = input_ids.ne(1 ).to(a_ )
__snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__snake_case = FalconForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : List[str] ):
"""simple docstring"""
for model_class in self.all_generative_model_classes:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(a_ , "use_cache" ):
return
__snake_case = model_class(a_ ).to(a_ )
if "use_cache" not in inputs:
__snake_case = True
__snake_case = model(**a_ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
__snake_case = (
getattr(a_ , "decoder_layers" , a_ )
or getattr(a_ , "num_decoder_layers" , a_ )
or config.num_hidden_layers
)
__snake_case = getattr(a_ , "num_kv_heads" , config.num_attention_heads )
__snake_case = getattr(a_ , "d_model" , config.hidden_size )
__snake_case = embed_dim // num_attention_heads
__snake_case = outputs["past_key_values"]
self.assertEqual(len(a_ ) , a_ )
__snake_case , __snake_case = inputs["input_ids"].shape
for i in range(a_ ):
if config.new_decoder_architecture:
__snake_case = config.num_attention_heads
elif config.multi_query:
__snake_case = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def A ( self : Any ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b" )
__snake_case = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b" )
model.eval()
model.to(a_ )
__snake_case = tokenizer("My favorite food is" , return_tensors="pt" ).to(a_ )
__snake_case = (
"My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."
)
__snake_case = model.generate(**a_ , do_sample=a_ , max_new_tokens=19 )
__snake_case = tokenizer.batch_decode(a_ )[0]
self.assertEqual(a_ , a_ )
@slow
def A ( self : Optional[int] ):
"""simple docstring"""
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
__snake_case = AutoTokenizer.from_pretrained(a_ )
__snake_case = FalconForCausalLM.from_pretrained(a_ )
model.eval()
model.to(a_ )
__snake_case = tokenizer("My favorite food is" , return_tensors="pt" ).to(a_ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**a_ , do_sample=a_ , max_new_tokens=4 )
model.generate(**a_ , do_sample=a_ , max_new_tokens=4 )
model.generate(**a_ , num_beams=2 , max_new_tokens=4 )
@slow
def A ( self : List[str] ):
"""simple docstring"""
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
__snake_case = AutoTokenizer.from_pretrained(a_ )
__snake_case = FalconForCausalLM.from_pretrained(a_ )
model.eval()
model.to(device=a_ )
__snake_case = tokenizer("My favorite food is" , return_tensors="pt" ).to(a_ )
# Test results are the same with and without cache
__snake_case = model.generate(**a_ , do_sample=a_ , max_new_tokens=20 , use_cache=a_ )
__snake_case = model.generate(**a_ , do_sample=a_ , max_new_tokens=20 , use_cache=a_ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 680
|
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , a_ : Tuple , a_ : Optional[Any]=2 , a_ : str=32 , a_ : Dict=16 , a_ : List[str]=3 , a_ : Dict=True , a_ : Optional[int]=True , a_ : List[str]=32 , a_ : int=4 , a_ : str=[0, 1, 2, 3] , a_ : Any=4 , a_ : Optional[int]=37 , a_ : Any="gelu" , a_ : Optional[int]=0.1 , a_ : Optional[Any]=0.1 , a_ : Union[str, Any]=0.02 , a_ : Union[str, Any]=3 , a_ : Any=[1, 384, 24, 24] , a_ : Optional[Any]=True , a_ : Optional[int]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = backbone_out_indices
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = backbone_featmap_shape
__snake_case = scope
__snake_case = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def A ( self : int ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=a_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def A ( self : int , a_ : Union[str, Any] , a_ : List[str] , a_ : List[str] ):
"""simple docstring"""
__snake_case = DPTModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , a_ : List[Any] , a_ : Union[str, Any] , a_ : List[str] ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForDepthEstimation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def A ( self : Optional[Any] , a_ : List[str] , a_ : int , a_ : Tuple ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , labels=a_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = DPTModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def A ( self : Any ):
"""simple docstring"""
pass
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def A ( self : Optional[int] ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
if model_class in get_values(a_ ):
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : int ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = False
__snake_case = True
if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing:
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = _config_zero_init(a_ )
for model_class in self.all_model_classes:
__snake_case = model_class(config=a_ )
# Skip the check for the backbone
__snake_case = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__snake_case = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A ( self : Tuple ):
"""simple docstring"""
pass
@slow
def A ( self : int ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__snake_case = DPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = "add"
with self.assertRaises(a_ ):
__snake_case = DPTForDepthEstimation(a_ )
def __UpperCAmelCase ( ) -> Union[str, Any]:
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Dict ):
"""simple docstring"""
__snake_case = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
__snake_case = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(a_ )
__snake_case = prepare_img()
__snake_case = image_processor(images=a_ , return_tensors="pt" ).to(a_ )
# forward pass
with torch.no_grad():
__snake_case = model(**a_ )
__snake_case = outputs.predicted_depth
# verify the predicted depth
__snake_case = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , a_ )
__snake_case = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , a_ , atol=1e-4 ) )
| 680
| 1
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__A : int = False
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : Optional[Any] ):
A__ : int =VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
A__ : Optional[Any] =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
A__ : int =torch.manual_seed(0 )
A__ : List[Any] =pipe.dual_guided(
prompt="first prompt" , image=__A , text_to_image_strength=0.75 , generator=__A , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__A )
A__ : Dict =VersatileDiffusionPipeline.from_pretrained(__A , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
A__ : Tuple =generator.manual_seed(0 )
A__ : Optional[Any] =pipe.dual_guided(
prompt="first prompt" , image=__A , text_to_image_strength=0.75 , generator=__A , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _UpperCAmelCase ( self : str ):
A__ : Dict =VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
A__ : Tuple ="cyberpunk 2077"
A__ : int =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
A__ : Tuple =torch.manual_seed(0 )
A__ : Tuple =pipe.dual_guided(
prompt=__A , image=__A , text_to_image_strength=0.75 , generator=__A , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
A__ : int =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A__ : Union[str, Any] =np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
A__ : List[str] ="A painting of a squirrel eating a burger "
A__ : Union[str, Any] =torch.manual_seed(0 )
A__ : Any =pipe.text_to_image(
prompt=__A , generator=__A , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
A__ : Optional[int] =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A__ : Tuple =np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
A__ : int =pipe.image_variation(__A , generator=__A , output_type="numpy" ).images
A__ : int =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A__ : Optional[int] =np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 656
|
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = False, False, False
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = None
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = None
# Automatically constructed
lowerCAmelCase_ = "dict"
lowerCAmelCase_ = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCAmelCase_ = field(default='Audio' , init=A , repr=A )
def __call__( self : Tuple ):
return self.pa_type
def lowerCamelCase_ ( self : Any,__A : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__A,__A ):
return {"bytes": None, "path": value}
elif isinstance(__A,__A ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_lowerCamelCase : List[Any] = BytesIO()
sf.write(__A,value["array"],value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_lowerCamelCase : Dict = np.frombuffer(value["bytes"],dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
_lowerCamelCase : str = np.memmap(value["path"],dtype="h",mode="r" ).astype(np.floataa ) / 3_2_7_6_7
_lowerCamelCase : Optional[int] = BytesIO(bytes() )
sf.write(__A,__A,value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowerCamelCase_ ( self : Optional[Any],__A : dict,__A : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
_lowerCamelCase : Tuple = xsplitext(__A )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
_lowerCamelCase : Tuple = token_per_repo_id or {}
_lowerCamelCase : Union[str, Any] = path.split("::" )[-1]
try:
_lowerCamelCase : str = string_to_dict(__A,config.HUB_DATASETS_URL )["repo_id"]
_lowerCamelCase : str = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_lowerCamelCase : Any = None
with xopen(__A,"rb",use_auth_token=__A ) as f:
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = sf.read(__A )
else:
_lowerCamelCase , _lowerCamelCase : str = sf.read(__A )
_lowerCamelCase : List[str] = array.T
if self.mono:
_lowerCamelCase : List[str] = librosa.to_mono(__A )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_lowerCamelCase : List[str] = librosa.resample(__A,orig_sr=__A,target_sr=self.sampling_rate )
_lowerCamelCase : Optional[Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowerCamelCase_ ( self : Any ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def lowerCamelCase_ ( self : List[str],__A : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
_lowerCamelCase : int = pa.StructArray.from_arrays([bytes_array, storage],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_lowerCamelCase : Dict = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Any = pa.StructArray.from_arrays([storage, path_array],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
_lowerCamelCase : Tuple = pa.array([Audio().encode_example(__A ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
_lowerCamelCase : Tuple = storage.field("bytes" )
else:
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
_lowerCamelCase : List[str] = storage.field("path" )
else:
_lowerCamelCase : Tuple = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Tuple = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=storage.is_null() )
return array_cast(__A,self.pa_type )
def lowerCamelCase_ ( self : str,__A : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__A : Dict ):
with xopen(__A,"rb" ) as f:
_lowerCamelCase : Any = f.read()
return bytes_
_lowerCamelCase : int = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
],type=pa.binary(),)
_lowerCamelCase : str = pa.array(
[os.path.basename(__A ) if path is not None else None for path in storage.field("path" ).to_pylist()],type=pa.string(),)
_lowerCamelCase : Dict = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=bytes_array.is_null() )
return array_cast(__A,self.pa_type )
| 44
| 0
|
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def A__ ( A__ , A__ ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = int(A__ )
assert noofclusters < len(A__ )
# Find out the dimensionality
_UpperCAmelCase = len(vectors[0] )
# Will help select random centroids from among the available vectors
_UpperCAmelCase = list(range(len(A__ ) ) )
shuffle(A__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_UpperCAmelCase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_UpperCAmelCase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_UpperCAmelCase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(A__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
_UpperCAmelCase = tf.placeholder("float64" , [dim] )
_UpperCAmelCase = []
for centroid in centroids:
cent_assigns.append(tf.assign(A__ , A__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_UpperCAmelCase = [tf.Variable(0 ) for i in range(len(A__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_UpperCAmelCase = tf.placeholder("int32" )
_UpperCAmelCase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(A__ , A__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_UpperCAmelCase = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_UpperCAmelCase = tf.reduce_mean(A__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
_UpperCAmelCase = tf.placeholder("float" , [dim] )
_UpperCAmelCase = tf.placeholder("float" , [dim] )
_UpperCAmelCase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(A__ , A__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_UpperCAmelCase = tf.placeholder("float" , [noofclusters] )
_UpperCAmelCase = tf.argmin(A__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_UpperCAmelCase = tf.initialize_all_variables()
# Initialize all variables
sess.run(A__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_UpperCAmelCase = 100
for _ in range(A__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(A__ ) ):
_UpperCAmelCase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_UpperCAmelCase = [
sess.run(A__ , feed_dict={va: vect, va: sess.run(A__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_UpperCAmelCase = sess.run(
A__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(A__ ):
# Collect all the vectors assigned to this cluster
_UpperCAmelCase = [
vectors[i]
for i in range(len(A__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_UpperCAmelCase = sess.run(
A__ , feed_dict={mean_input: array(A__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_UpperCAmelCase = sess.run(A__ )
_UpperCAmelCase = sess.run(A__ )
return centroids, assignments
| 579
|
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A__ : Optional[Any] = ["image_processor", "tokenizer"]
A__ : List[Any] = "BlipImageProcessor"
A__ : List[str] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , snake_case_ , snake_case_ ) -> Any:
_UpperCAmelCase = False
super().__init__(snake_case_ , snake_case_ )
_UpperCAmelCase = self.image_processor
def __call__( self , snake_case_ = None , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
_UpperCAmelCase = self.tokenizer
_UpperCAmelCase = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
return text_encoding
# add pixel_values
_UpperCAmelCase = self.image_processor(snake_case_ , return_tensors=snake_case_ )
if text is not None:
_UpperCAmelCase = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
else:
_UpperCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(snake_case_ )
return encoding_image_processor
def __A ( self , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def __A ( self , *snake_case_ , **snake_case_ ) -> Optional[int]:
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def __A ( self ) -> Any:
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 579
| 1
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(A ) , 'Tatoeba directory does not exist.' )
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = tempfile.mkdtemp()
return TatoebaConverter(save_dir=A )
@slow
def a__ (self ) -> Dict:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def a__ (self ) -> Tuple:
"""simple docstring"""
_a , _a = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=A )
assert mmeta["long_pair"] == "heb-eng"
| 11
|
import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Dict ='bertabs'
def __init__( self : str , __lowercase : int=30522 , __lowercase : str=512 , __lowercase : Dict=6 , __lowercase : Dict=512 , __lowercase : Tuple=8 , __lowercase : Any=512 , __lowercase : int=0.2 , __lowercase : Union[str, Any]=6 , __lowercase : str=768 , __lowercase : int=8 , __lowercase : Union[str, Any]=2048 , __lowercase : List[str]=0.2 , **__lowercase : int , ):
'''simple docstring'''
super().__init__(**__lowercase )
__a = vocab_size
__a = max_pos
__a = enc_layers
__a = enc_hidden_size
__a = enc_heads
__a = enc_ff_size
__a = enc_dropout
__a = dec_layers
__a = dec_hidden_size
__a = dec_heads
__a = dec_ff_size
__a = dec_dropout
| 225
| 0
|
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase__ : Union[List[ControlNetModel], Tuple[ControlNetModel]] ) ->Any:
super().__init__()
UpperCAmelCase_ = nn.ModuleList(UpperCAmelCase__ )
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : Union[torch.Tensor, float, int] , UpperCAmelCase__ : torch.Tensor , UpperCAmelCase__ : List[torch.tensor] , UpperCAmelCase__ : List[float] , UpperCAmelCase__ : Optional[torch.Tensor] = None , UpperCAmelCase__ : Optional[torch.Tensor] = None , UpperCAmelCase__ : Optional[torch.Tensor] = None , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True , ) ->Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(UpperCAmelCase__ , UpperCAmelCase__ , self.nets ) ):
UpperCAmelCase_ , UpperCAmelCase_ = controlnet(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , )
# merge samples
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ = down_samples, mid_sample
else:
UpperCAmelCase_ = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(UpperCAmelCase__ , UpperCAmelCase__ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : Union[str, os.PathLike] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Callable = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[str] = None , ) ->Union[str, Any]:
UpperCAmelCase_ = 0
UpperCAmelCase_ = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
UpperCAmelCase__ , is_main_process=UpperCAmelCase__ , save_function=UpperCAmelCase__ , safe_serialization=UpperCAmelCase__ , variant=UpperCAmelCase__ , )
idx += 1
UpperCAmelCase_ = model_path_to_save + f"""_{idx}"""
@classmethod
def lowerCAmelCase__ ( cls : Any , UpperCAmelCase__ : Optional[Union[str, os.PathLike]] , **UpperCAmelCase__ : Any ) ->List[str]:
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
UpperCAmelCase_ = pretrained_model_path
while os.path.isdir(UpperCAmelCase__ ):
UpperCAmelCase_ = ControlNetModel.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
controlnets.append(UpperCAmelCase__ )
idx += 1
UpperCAmelCase_ = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(UpperCAmelCase__ )} controlnets loaded from {pretrained_model_path}.""" )
if len(UpperCAmelCase__ ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(UpperCAmelCase__ )}. Expected at least {pretrained_model_path + "_0"}.""" )
return cls(UpperCAmelCase__ )
| 714
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase__ : Union[str, Any] = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = ["MobileViTFeatureExtractor"]
lowercase__ : List[Any] = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowercase__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 43
| 0
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
a_ = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
a_ = concatenate_datasets
a_ = DownloadConfig
a_ = DownloadManager
a_ = DownloadMode
a_ = DownloadConfig
a_ = DownloadMode
a_ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 25
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
UpperCAmelCase_ = logging.getLogger(__name__)
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase: List[str] = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=lowercase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=lowercase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=lowercase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=lowercase , default=1_000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=lowercase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=lowercase , type=lowercase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=lowercase , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=lowercase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
_UpperCamelCase: str = parser.parse_args()
return args
def lowerCAmelCase_ ( lowercase: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
def fn(lowercase: List[Any] ):
return tokenizer(examples['''text'''] )
return fn
def lowerCAmelCase_ ( lowercase: List[str] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase: int = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
_UpperCamelCase: Tuple = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
_UpperCamelCase: Optional[int] = tf.train.Features(feature=lowercase )
_UpperCamelCase: Optional[int] = tf.train.Example(features=lowercase )
_UpperCamelCase: List[Any] = example.SerializeToString()
records.append(lowercase )
return records
def lowerCAmelCase_ ( lowercase: List[str] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase: Tuple = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
_UpperCamelCase: str = min(len(lowercase ) , args.limit )
_UpperCamelCase: Optional[Any] = dataset.select(range(lowercase ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
_UpperCamelCase: List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_UpperCamelCase: Any = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowercase ):
os.makedirs(lowercase )
else:
_UpperCamelCase: str = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
_UpperCamelCase: Union[str, Any] = tokenize_function(lowercase )
_UpperCamelCase: List[Any] = dataset.map(lowercase , batched=lowercase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowercase: int ):
# Concatenate all texts.
_UpperCamelCase: List[str] = {k: sum(examples[k] , [] ) for k in examples.keys()}
_UpperCamelCase: Any = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_UpperCamelCase: Optional[int] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_UpperCamelCase: Tuple = {
k: [t[i : i + args.max_length] for i in range(0 , lowercase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_UpperCamelCase: Dict = dataset_tokenized.map(lowercase , batched=lowercase , batch_size=1_000 , num_proc=4 )
_UpperCamelCase: List[str] = 0
_UpperCamelCase: Tuple = 0
for shard in range(0 , len(lowercase ) , args.shard_size ):
_UpperCamelCase: List[str] = grouped_dataset[shard : shard + args.shard_size]
_UpperCamelCase: Optional[Any] = len(dataset_snapshot['''input_ids'''] )
_UpperCamelCase: str = os.path.join(lowercase , F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
_UpperCamelCase: Any = get_serialized_examples(lowercase )
with tf.io.TFRecordWriter(lowercase ) as out_file:
for i in range(len(lowercase ) ):
_UpperCamelCase: List[Any] = serialized_examples[i]
out_file.write(lowercase )
print('''Wrote file {} containing {} records'''.format(lowercase , lowercase ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""" , '''w''' ) as f:
print(F"""Total {args.split} records: {total_records}""" , file=lowercase )
if __name__ == "__main__":
UpperCAmelCase_ = parse_args()
main(args)
| 271
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : str = logging.get_logger(__name__)
A : Dict = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "data2vec-vision"
def __init__( self : Dict , __lowerCamelCase : List[str]=768 , __lowerCamelCase : int=12 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : Dict=3072 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Optional[int]=0.0 , __lowerCamelCase : Union[str, Any]=0.0_2 , __lowerCamelCase : Dict=1E-1_2 , __lowerCamelCase : str=224 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str=[3, 5, 7, 11] , __lowerCamelCase : str=[1, 2, 3, 6] , __lowerCamelCase : int=True , __lowerCamelCase : Any=0.4 , __lowerCamelCase : Union[str, Any]=256 , __lowerCamelCase : int=1 , __lowerCamelCase : List[str]=False , __lowerCamelCase : Dict=255 , **__lowerCamelCase : Any , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : Dict = intermediate_size
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : List[str] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Any = initializer_range
lowerCamelCase__ : Dict = layer_norm_eps
lowerCamelCase__ : List[Any] = image_size
lowerCamelCase__ : Tuple = patch_size
lowerCamelCase__ : List[str] = num_channels
lowerCamelCase__ : List[str] = use_mask_token
lowerCamelCase__ : Tuple = use_absolute_position_embeddings
lowerCamelCase__ : Optional[int] = use_relative_position_bias
lowerCamelCase__ : str = use_shared_relative_position_bias
lowerCamelCase__ : List[str] = layer_scale_init_value
lowerCamelCase__ : List[str] = drop_path_rate
lowerCamelCase__ : Optional[int] = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCamelCase__ : Dict = out_indices
lowerCamelCase__ : Union[str, Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCamelCase__ : str = use_auxiliary_head
lowerCamelCase__ : Optional[int] = auxiliary_loss_weight
lowerCamelCase__ : List[str] = auxiliary_channels
lowerCamelCase__ : List[str] = auxiliary_num_convs
lowerCamelCase__ : str = auxiliary_concat_input
lowerCamelCase__ : Any = semantic_loss_ignore_index
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = version.parse("1.11")
@property
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return 1E-4
| 5
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
A : Union[str, Any] = logging.get_logger(__name__)
A : Union[str, Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
A : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowercase :
"""simple docstring"""
A__ = field(
default=lowercase__ , metadata={"help": "Model type selected in the list: " + ", ".join(lowercase__)})
A__ = field(
default=lowercase__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."})
A__ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ = field(
default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
A__ = field(
default=64 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
A__ = field(
default=30 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
A__ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"})
A__ = field(
default=lowercase__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."})
A__ = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."})
A__ = field(
default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."})
A__ = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
A__ = field(default=1 , metadata={"help": "multiple threads for converting example to features"})
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "train"
A__ = "dev"
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = 42
A__ = 42
A__ = 42
A__ = 42
def __init__( self : Optional[int] , __lowerCamelCase : SquadDataTrainingArguments , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Union[str, Split] = Split.train , __lowerCamelCase : Optional[bool] = False , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = "pt" , ):
'''simple docstring'''
lowerCamelCase__ : List[str] = args
lowerCamelCase__ : Tuple = is_language_sensitive
lowerCamelCase__ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
lowerCamelCase__ : List[str] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase__ : str = mode
# Load data features from cache or dataset file
lowerCamelCase__ : Any = "v2" if args.version_2_with_negative else "v1"
lowerCamelCase__ : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ : List[str] = cached_features_file + ".lock"
with FileLock(__lowerCamelCase ):
if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache:
lowerCamelCase__ : str = time.time()
lowerCamelCase__ : Tuple = torch.load(__lowerCamelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase__ : Optional[Any] = self.old_features["features"]
lowerCamelCase__ : Optional[int] = self.old_features.get("dataset" , __lowerCamelCase )
lowerCamelCase__ : Optional[Any] = self.old_features.get("examples" , __lowerCamelCase )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
" future run" )
else:
if mode == Split.dev:
lowerCamelCase__ : List[Any] = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase__ : str = self.processor.get_train_examples(args.data_dir )
lowerCamelCase__ , lowerCamelCase__ : Tuple = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__lowerCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__lowerCamelCase , )
lowerCamelCase__ : int = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , __lowerCamelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : List[Any] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : List[str] , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.features[i]
lowerCamelCase__ : Tuple = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase__ : List[Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase__ : Tuple = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase__ : Any = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase__ : Any = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase__ : Union[str, Any] = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase__ : List[str] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase__ : List[Any] = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase__ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 5
| 1
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: Tuple =ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Tuple ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Union[str, Any] =features.copy() if features else default_expected_features
lowerCamelCase__: Optional[int] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: int =ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[Any] =ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_parquet_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCAmelCase_ ( __a , __a , __a ) -> int:
"""simple docstring"""
if issubclass(__a , __a ):
lowerCamelCase__: List[Any] =parquet_path
elif issubclass(__a , __a ):
lowerCamelCase__: str =[parquet_path]
lowerCamelCase__: Tuple =tmp_path / "cache"
lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
def lowerCAmelCase_ ( __a , __a , __a=("train",) ) -> Dict:
"""simple docstring"""
assert isinstance(__a , __a )
for split in splits:
lowerCamelCase__: Tuple =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: List[Any] =tmp_path / "cache"
lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: Tuple =ParquetDatasetReader(
{"train": parquet_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =tmp_path / "cache"
lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: List[Any] =features.copy() if features else default_expected_features
lowerCamelCase__: int =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: Optional[Any] =ParquetDatasetReader({"train": parquet_path} , features=__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
if split:
lowerCamelCase__: Any ={split: parquet_path}
else:
lowerCamelCase__: int ="train"
lowerCamelCase__: Any ={"train": parquet_path, "test": parquet_path}
lowerCamelCase__: str =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ ( __a , __a ) -> int:
"""simple docstring"""
lowerCamelCase__: List[str] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: List[str] =pq.ParquetFile(tmp_path / "foo.parquet" )
lowerCamelCase__: List[str] =pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ ( __a , __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: List[str] =str(shared_datadir / "test_image_rgb.jpg" )
lowerCamelCase__: Union[str, Any] ={"image": [image_path]}
lowerCamelCase__: Optional[Any] =Features({"image": Image()} )
lowerCamelCase__: Optional[int] =Dataset.from_dict(__a , features=__a )
lowerCamelCase__: Optional[int] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Dict =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
lowerCamelCase__: Optional[Any] =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__a ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]:
"""simple docstring"""
assert get_writer_batch_size(__a ) == expected
| 59
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__magic_name__ : Optional[int] = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ):
_snake_case = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
_snake_case = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_snake_case = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 672
| 0
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCamelCase ( __lowerCAmelCase ):
_A : Any = 42
_A : Union[str, Any] = 42
def __init__( self : Dict , lowerCAmelCase__ : UNetaDModel , lowerCAmelCase__ : ScoreSdeVeScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
@torch.no_grad()
def __call__( self : Dict , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : int = 2_0_0_0 , lowerCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : Optional[Any] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.unet.config.sample_size
__SCREAMING_SNAKE_CASE : Optional[int] = (batch_size, 3, img_size, img_size)
__SCREAMING_SNAKE_CASE : int = self.unet
__SCREAMING_SNAKE_CASE : Union[str, Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase ) * self.scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE : List[str] = sample.to(self.device )
self.scheduler.set_timesteps(_UpperCamelCase )
self.scheduler.set_sigmas(_UpperCamelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__SCREAMING_SNAKE_CASE : Dict = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__SCREAMING_SNAKE_CASE : List[str] = self.unet(_UpperCamelCase , _UpperCamelCase ).sample
__SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.step_correct(_UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase ).prev_sample
# prediction step
__SCREAMING_SNAKE_CASE : str = model(_UpperCamelCase , _UpperCamelCase ).sample
__SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.step_pred(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample, output.prev_sample_mean
__SCREAMING_SNAKE_CASE : Union[str, Any] = sample_mean.clamp(0 , 1 )
__SCREAMING_SNAKE_CASE : Tuple = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 715
|
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: int ):
if number > 0:
raise ValueError("""input must be a negative integer""" )
__SCREAMING_SNAKE_CASE : str = len(bin(_lowerCamelCase )[3:] )
__SCREAMING_SNAKE_CASE : Any = bin(abs(_lowerCamelCase ) - (1 << binary_number_length) )[3:]
__SCREAMING_SNAKE_CASE : Optional[int] = (
(
"""1"""
+ """0""" * (binary_number_length - len(_lowerCamelCase ))
+ twos_complement_number
)
if number < 0
else """0"""
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 178
| 0
|
'''simple docstring'''
import re
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
if len(re.findall("[ATCG]" , _SCREAMING_SNAKE_CASE ) ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
snake_case : int = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"
snake_case : List[str] = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"
snake_case : Tuple = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"
snake_case : str = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"
snake_case : Any = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a=[1, 10, 100] , _a=4 , _a=3.0 ):
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=_a ) as executor:
__magic_name__ : Any = []
__magic_name__ : Union[str, Any] = Counter()
__magic_name__ : Union[str, Any] = 0
__magic_name__ : Optional[int] = defaultdict(_a )
for task_id, (candidates, test_case) in enumerate(zip(_a , _a ) ):
for candidate in candidates:
__magic_name__ : List[str] = candidate + "\n" + test_case
__magic_name__ : Tuple = (test_program, timeout, task_id, completion_id[task_id])
__magic_name__ : Optional[Any] = executor.submit(_a , *_a )
futures.append(_a )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_a ):
__magic_name__ : List[Any] = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
__magic_name__ , __magic_name__ : Optional[Any] = [], []
for result in results.values():
result.sort()
__magic_name__ : Any = [r[1]["passed"] for r in result]
total.append(len(_a ) )
correct.append(sum(_a ) )
__magic_name__ : List[Any] = np.array(_a )
__magic_name__ : Tuple = np.array(_a )
__magic_name__ : List[Any] = k
__magic_name__ : int = {f'''pass@{k}''': estimate_pass_at_k(_a , _a , _a ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : int , _snake_case : Union[str, Any] ) -> Any:
'''simple docstring'''
def estimator(_snake_case : int , _snake_case : int , _snake_case : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_snake_case , _snake_case ):
__magic_name__ : Optional[Any] = itertools.repeat(_snake_case , len(_snake_case ) )
else:
assert len(_snake_case ) == len(_snake_case )
__magic_name__ : int = iter(_snake_case )
return np.array([estimator(int(_snake_case ) , int(_snake_case ) , _snake_case ) for n, c in zip(_snake_case , _snake_case )] )
| 124
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : str = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Union[str, Any] = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 719
|
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_UpperCamelCase : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def __UpperCamelCase ( snake_case , snake_case , snake_case = 1_6_0_0_0 ) -> Tuple:
'''simple docstring'''
__A = int(round(sample_rate * max_length ) )
if len(snake_case ) <= sample_length:
return wav
__A = randint(0 , len(snake_case ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class _lowerCAmelCase:
"""simple docstring"""
lowerCamelCase__ = field(default=_a , metadata={'''help''': '''Name of a dataset from the datasets package'''})
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''})
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''A file containing the training audio paths and labels.'''})
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''A file containing the validation audio paths and labels.'''})
lowerCamelCase__ = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
lowerCamelCase__ = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
lowerCamelCase__ = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
lowerCamelCase__ = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''})
lowerCamelCase__ = field(
default=_a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCamelCase__ = field(
default=_a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
lowerCamelCase__ = field(
default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class _lowerCAmelCase:
"""simple docstring"""
lowerCamelCase__ = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''})
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''})
lowerCamelCase__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''Name or path of preprocessor config.'''})
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''})
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''})
lowerCamelCase__ = field(
default=_a , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''})
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def SCREAMING_SNAKE_CASE__ ( self )-> List[str]:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , UpperCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def __UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
__A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__A , __A , __A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__A , __A , __A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , snake_case , snake_case )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__A = training_args.get_process_log_level()
logger.setLevel(snake_case )
transformers.utils.logging.set_verbosity(snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
__A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
__A = DatasetDict()
__A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
__A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. "
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
F"{', '.join(raw_datasets['train'].column_names )}." )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. "
'''Make sure to set `--label_column_name` to the correct text column - one of '''
F"{', '.join(raw_datasets['train'].column_names )}." )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
__A = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
__A = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
__A = feature_extractor.model_input_names[0]
def train_transforms(snake_case ):
__A = []
for audio in batch[data_args.audio_column_name]:
__A = random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(snake_case )
__A = feature_extractor(snake_case , sampling_rate=feature_extractor.sampling_rate )
__A = {model_input_name: inputs.get(snake_case )}
__A = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(snake_case ):
__A = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
__A = feature_extractor(snake_case , sampling_rate=feature_extractor.sampling_rate )
__A = {model_input_name: inputs.get(snake_case )}
__A = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__A = raw_datasets['''train'''].features[data_args.label_column_name].names
__A , __A = {}, {}
for i, label in enumerate(snake_case ):
__A = str(snake_case )
__A = label
# Load the accuracy metric from the datasets package
__A = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(snake_case ):
__A = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=snake_case , references=eval_pred.label_ids )
__A = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(snake_case ) , labelaid=snake_case , idalabel=snake_case , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__A = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
__A = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(snake_case , output_all_columns=snake_case )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__A = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(snake_case , output_all_columns=snake_case )
# Initialize our trainer
__A = Trainer(
model=snake_case , args=snake_case , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=snake_case , tokenizer=snake_case , )
# Training
if training_args.do_train:
__A = None
if training_args.resume_from_checkpoint is not None:
__A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__A = last_checkpoint
__A = trainer.train(resume_from_checkpoint=snake_case )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__A = trainer.evaluate()
trainer.log_metrics('''eval''' , snake_case )
trainer.save_metrics('''eval''' , snake_case )
# Write model card and (optionally) push to hub
__A = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case )
else:
trainer.create_model_card(**snake_case )
if __name__ == "__main__":
main()
| 341
| 0
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
_a = None
_a = logging.get_logger(__name__)
_a = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_a = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
_a = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase_ = TaTokenizer
lowerCAmelCase_ = []
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase=1_0_0 , __lowerCAmelCase=None , **__lowerCAmelCase , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowerCamelCase__ = [F'<extra_id_{i}>' for i in range(__lowerCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowerCamelCase__ = len(set(filter(lambda __lowerCAmelCase : bool('''extra_id_''' in str(__lowerCAmelCase ) ) , __lowerCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , extra_ids=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
lowerCamelCase__ = extra_ids
@staticmethod
def __lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowerCamelCase__ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , __lowerCAmelCase , )
return max_model_length
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase__ = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file , __lowerCAmelCase )
logger.info(F'Copy vocab file to {out_vocab_file}' )
return (out_vocab_file,)
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
lowerCamelCase__ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowerCamelCase__ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
lowerCamelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __lowerCamelCase ( self ):
'''simple docstring'''
return list(
set(filter(lambda __lowerCAmelCase : bool(re.search(r'''<extra_id_\d+>''' , __lowerCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
return [self.convert_tokens_to_ids(__lowerCAmelCase ) for token in self.get_sentinel_tokens()]
| 481
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' ,[
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__snake_case ,i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 ,4 ), range(4 ,7 ), range(7 ,10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 ,1 ), range(1 ,2 ), range(2 ,3 )]),
] ,)
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = _distribute_shards(**__snake_case )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' ,[
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] ,)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = _split_gen_kwargs(__snake_case ,__snake_case )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' ,[
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] ,)
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Union[str, Any]:
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(__snake_case ):
_number_of_shards_in_gen_kwargs(__snake_case )
else:
lowerCamelCase__ = _number_of_shards_in_gen_kwargs(__snake_case )
assert out == expected
| 481
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ ={
"configuration_autoformer": [
"AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AutoformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ =[
"AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"AutoformerForPrediction",
"AutoformerModel",
"AutoformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 442
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ ={
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ =[
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
UpperCAmelCase__ =[
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
UpperCAmelCase__ =[
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
UpperCAmelCase__ =[
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 442
| 1
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowerCAmelCase__ ( _a : int , _a : int , _a : int , _a : int , _a : int , _a : int ):
if (ksize % 2) == 0:
snake_case_ : Tuple = ksize + 1
snake_case_ : str = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_a ):
for x in range(_a ):
# distance from center
snake_case_ : str = x - ksize // 2
snake_case_ : List[str] = y - ksize // 2
# degree to radiant
snake_case_ : Tuple = theta / 1_80 * np.pi
snake_case_ : List[str] = np.cos(_theta )
snake_case_ : List[str] = np.sin(_theta )
# get kernel x
snake_case_ : List[str] = cos_theta * px + sin_theta * py
# get kernel y
snake_case_ : Optional[Any] = -sin_theta * px + cos_theta * py
# fill kernel
snake_case_ : int = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
lowercase : Any = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
lowercase : List[Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
lowercase : Union[str, Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
lowercase : str = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
lowercase : Any = out / out.max() * 2_55
lowercase : Tuple = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 568
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 178
| 0
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
a : List[Any] = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] , _lowercase : Any , _lowercase : str , _lowercase : List[Any]=None ) ->Union[str, Any]:
'''simple docstring'''
a : Optional[int] = XLNetConfig.from_json_file(_lowercase )
a : List[Any] = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
a : int = finetuning_task
a : Dict = GLUE_TASKS_NUM_LABELS[finetuning_task]
a : Union[str, Any] = XLNetForSequenceClassification(_lowercase )
elif "squad" in finetuning_task:
a : int = finetuning_task
a : Union[str, Any] = XLNetForQuestionAnswering(_lowercase )
else:
a : Dict = XLNetLMHeadModel(_lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowercase , _lowercase , _lowercase )
# Save pytorch-model
a : str = os.path.join(_lowercase , _lowercase )
a : Union[str, Any] = os.path.join(_lowercase , _lowercase )
print(F"""Save PyTorch model to {os.path.abspath(_lowercase )}""" )
torch.save(model.state_dict() , _lowercase )
print(F"""Save configuration file to {os.path.abspath(_lowercase )}""" )
with open(_lowercase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
a : List[Any] = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 711
|
"""simple docstring"""
a : str = 8.314_4598
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
a : Any = 300
a : Dict = 28
a : Dict = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 31
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def __snake_case ( SCREAMING_SNAKE_CASE_ : Callable[[int | float], int | float] , SCREAMING_SNAKE_CASE_ : int | float , SCREAMING_SNAKE_CASE_ : int | float , SCREAMING_SNAKE_CASE_ : int = 100 , ) -> float:
"""simple docstring"""
UpperCAmelCase = x_start
UpperCAmelCase = fnc(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = 0.0
for _ in range(SCREAMING_SNAKE_CASE_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCAmelCase = (x_end - x_start) / steps + xa
UpperCAmelCase = fnc(SCREAMING_SNAKE_CASE_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCAmelCase = xa
UpperCAmelCase = fxa
return area
if __name__ == "__main__":
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
a__ : Union[str, Any] = 10
while i <= 100_000:
print(F"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 51
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 356
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Union[str, Any] = '''switch_transformers'''
A__ : Tuple = ['''past_key_values''']
A__ : Dict = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Optional[Any] , __lowerCamelCase : Tuple=3_2_1_2_8 , __lowerCamelCase : int=7_6_8 , __lowerCamelCase : List[Any]=6_4 , __lowerCamelCase : Union[str, Any]=2_0_4_8 , __lowerCamelCase : Optional[Any]=6_4 , __lowerCamelCase : Union[str, Any]=1_2 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : str=1_2 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Dict=1_2 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Optional[int]=0.0_1 , __lowerCamelCase : List[str]="float32" , __lowerCamelCase : List[str]=False , __lowerCamelCase : List[str]=3_2 , __lowerCamelCase : int=1_2_8 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[int]=1E-6 , __lowerCamelCase : List[str]=0.0_0_1 , __lowerCamelCase : Optional[Any]=0.0_0_1 , __lowerCamelCase : List[str]=1.0 , __lowerCamelCase : Union[str, Any]="relu" , __lowerCamelCase : Tuple=True , __lowerCamelCase : Tuple=False , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Any=0 , __lowerCamelCase : str=1 , **__lowerCamelCase : Optional[Any] , ):
"""simple docstring"""
_snake_case = vocab_size
_snake_case = d_model
_snake_case = d_kv
_snake_case = d_ff
_snake_case = num_sparse_encoder_layers
_snake_case = num_layers
_snake_case = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_snake_case = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
_snake_case = self.num_layers // self.num_sparse_encoder_layers
else:
_snake_case = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
_snake_case = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
_snake_case = self.num_decoder_layers # HACK: this will create 0 sparse layers
_snake_case = num_heads
_snake_case = num_experts
_snake_case = expert_capacity
_snake_case = router_bias
_snake_case = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
_snake_case = router_dtype
_snake_case = router_ignore_padding_tokens
_snake_case = relative_attention_num_buckets
_snake_case = relative_attention_max_distance
_snake_case = dropout_rate
_snake_case = layer_norm_epsilon
_snake_case = initializer_factor
_snake_case = feed_forward_proj
_snake_case = use_cache
_snake_case = add_router_probs
_snake_case = router_z_loss_coef
_snake_case = router_aux_loss_coef
_snake_case = self.feed_forward_proj.split('''-''' )
_snake_case = act_info[-1]
_snake_case = act_info[0] == '''gated'''
if len(__lowerCamelCase ) > 1 and act_info[0] != "gated" or len(__lowerCamelCase ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_snake_case = '''gelu_new'''
super().__init__(
pad_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase , )
| 404
|
"""simple docstring"""
def snake_case ( ) -> Tuple:
_snake_case = 0
for i in range(1 , 1001 ):
total += i**i
return str(lowerCAmelCase_ )[-10:]
if __name__ == "__main__":
print(solution())
| 404
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'llama'
SCREAMING_SNAKE_CASE_ = ['past_key_values']
def __init__( self , SCREAMING_SNAKE_CASE_=32000 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_=11008 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="silu" , SCREAMING_SNAKE_CASE_=2048 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-6 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = hidden_size
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = num_key_value_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = initializer_range
lowerCamelCase_ = rms_norm_eps
lowerCamelCase_ = pretraining_tp
lowerCamelCase_ = use_cache
lowerCamelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'''got {self.rope_scaling}''' )
lowerCamelCase_ = self.rope_scaling.get('type' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.rope_scaling.get('factor' , SCREAMING_SNAKE_CASE_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 42
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 1
| 0
|
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = AutoencoderKL
__UpperCAmelCase = """sample"""
__UpperCAmelCase = 1e-2
@property
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
snake_case__ : Union[str, Any] = 4
snake_case__ : Optional[Any] = 3
snake_case__ : List[Any] = (3_2, 3_2)
snake_case__ : Dict = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case_ )
return {"sample": image}
@property
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
return (3, 3_2, 3_2)
@property
def __magic_name__ ( self : Any ):
'''simple docstring'''
return (3, 3_2, 3_2)
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : Optional[int] = {
'''block_out_channels''': [3_2, 6_4],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
snake_case__ : Dict = self.dummy_input
return init_dict, inputs_dict
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
pass
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ , snake_case__ : Optional[int] = self.prepare_init_args_and_inputs_for_common()
snake_case__ : Any = self.model_class(**snake_case_ )
model.to(snake_case_ )
assert not model.is_gradient_checkpointing and model.training
snake_case__ : Dict = model(**snake_case_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
snake_case__ : List[Any] = torch.randn_like(snake_case_ )
snake_case__ : int = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
snake_case__ : Optional[int] = self.model_class(**snake_case_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(snake_case_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
snake_case__ : Optional[Any] = model_a(**snake_case_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
snake_case__ : str = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
snake_case__ : Optional[int] = dict(model.named_parameters() )
snake_case__ : Dict = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
snake_case__ , snake_case__ : Any = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(snake_case_ )
snake_case__ : Optional[int] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ : Any = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
snake_case__ : List[Any] = model.to(snake_case_ )
model.eval()
if torch_device == "mps":
snake_case__ : List[str] = torch.manual_seed(0 )
else:
snake_case__ : int = torch.Generator(device=snake_case_ ).manual_seed(0 )
snake_case__ : Union[str, Any] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case__ : Union[str, Any] = image.to(snake_case_ )
with torch.no_grad():
snake_case__ : Optional[Any] = model(snake_case_ , sample_posterior=snake_case_ , generator=snake_case_ ).sample
snake_case__ : Optional[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
snake_case__ : Dict = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
] )
elif torch_device == "cpu":
snake_case__ : Tuple = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
snake_case__ : List[str] = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(snake_case_ , snake_case_ , rtol=1e-2 ) )
@slow
class a ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : Optional[int] , snake_case_ : List[Any] , snake_case_ : List[Any] ):
'''simple docstring'''
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(snake_case_ ) for s in shape] )}.npy"""
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Any , snake_case_ : Union[str, Any]=0 , snake_case_ : str=(4, 3, 5_1_2, 5_1_2) , snake_case_ : Any=False ):
'''simple docstring'''
snake_case__ : Union[str, Any] = torch.floataa if fpaa else torch.floataa
snake_case__ : Optional[int] = torch.from_numpy(load_hf_numpy(self.get_file_format(snake_case_ , snake_case_ ) ) ).to(snake_case_ ).to(snake_case_ )
return image
def __magic_name__ ( self : str , snake_case_ : Dict="CompVis/stable-diffusion-v1-4" , snake_case_ : int=False ):
'''simple docstring'''
snake_case__ : Optional[Any] = '''fp16''' if fpaa else None
snake_case__ : Optional[Any] = torch.floataa if fpaa else torch.floataa
snake_case__ : Optional[int] = AutoencoderKL.from_pretrained(
snake_case_ , subfolder='''vae''' , torch_dtype=snake_case_ , revision=snake_case_ , )
model.to(snake_case_ ).eval()
return model
def __magic_name__ ( self : List[Any] , snake_case_ : Any=0 ):
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(snake_case_ )
return torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[4_7, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def __magic_name__ ( self : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
'''simple docstring'''
snake_case__ : Optional[Any] = self.get_sd_vae_model()
snake_case__ : Optional[int] = self.get_sd_image(snake_case_ )
snake_case__ : str = self.get_generator(snake_case_ )
with torch.no_grad():
snake_case__ : str = model(snake_case_ , generator=snake_case_ , sample_posterior=snake_case_ ).sample
assert sample.shape == image.shape
snake_case__ : List[str] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
snake_case__ : Optional[Any] = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(snake_case_ , snake_case_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[4_7, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def __magic_name__ ( self : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : str ):
'''simple docstring'''
snake_case__ : Optional[Any] = self.get_sd_vae_model(fpaa=snake_case_ )
snake_case__ : Tuple = self.get_sd_image(snake_case_ , fpaa=snake_case_ )
snake_case__ : int = self.get_generator(snake_case_ )
with torch.no_grad():
snake_case__ : Any = model(snake_case_ , generator=snake_case_ , sample_posterior=snake_case_ ).sample
assert sample.shape == image.shape
snake_case__ : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
snake_case__ : List[str] = torch.tensor(snake_case_ )
assert torch_all_close(snake_case_ , snake_case_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[4_7, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def __magic_name__ ( self : str , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : int ):
'''simple docstring'''
snake_case__ : List[str] = self.get_sd_vae_model()
snake_case__ : List[str] = self.get_sd_image(snake_case_ )
with torch.no_grad():
snake_case__ : List[Any] = model(snake_case_ ).sample
assert sample.shape == image.shape
snake_case__ : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
snake_case__ : str = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(snake_case_ , snake_case_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[1_3, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[3_7, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def __magic_name__ ( self : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Dict ):
'''simple docstring'''
snake_case__ : Tuple = self.get_sd_vae_model()
snake_case__ : int = self.get_sd_image(snake_case_ , shape=(3, 4, 6_4, 6_4) )
with torch.no_grad():
snake_case__ : Tuple = model.decode(snake_case_ ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
snake_case__ : Tuple = sample[-1, -2:, :2, -2:].flatten().cpu()
snake_case__ : List[str] = torch.tensor(snake_case_ )
assert torch_all_close(snake_case_ , snake_case_ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[2_7, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[1_6, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def __magic_name__ ( self : int , snake_case_ : int , snake_case_ : Union[str, Any] ):
'''simple docstring'''
snake_case__ : int = self.get_sd_vae_model(fpaa=snake_case_ )
snake_case__ : List[Any] = self.get_sd_image(snake_case_ , shape=(3, 4, 6_4, 6_4) , fpaa=snake_case_ )
with torch.no_grad():
snake_case__ : Tuple = model.decode(snake_case_ ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
snake_case__ : List[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
snake_case__ : List[str] = torch.tensor(snake_case_ )
assert torch_all_close(snake_case_ , snake_case_ , atol=5e-3 )
@parameterized.expand([(1_3,), (1_6,), (2_7,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def __magic_name__ ( self : Tuple , snake_case_ : List[str] ):
'''simple docstring'''
snake_case__ : int = self.get_sd_vae_model(fpaa=snake_case_ )
snake_case__ : List[Any] = self.get_sd_image(snake_case_ , shape=(3, 4, 6_4, 6_4) , fpaa=snake_case_ )
with torch.no_grad():
snake_case__ : Tuple = model.decode(snake_case_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
snake_case__ : List[Any] = model.decode(snake_case_ ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
assert torch_all_close(snake_case_ , snake_case_ , atol=1e-1 )
@parameterized.expand([(1_3,), (1_6,), (3_7,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def __magic_name__ ( self : Union[str, Any] , snake_case_ : Optional[Any] ):
'''simple docstring'''
snake_case__ : Optional[Any] = self.get_sd_vae_model()
snake_case__ : Tuple = self.get_sd_image(snake_case_ , shape=(3, 4, 6_4, 6_4) )
with torch.no_grad():
snake_case__ : Optional[Any] = model.decode(snake_case_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
snake_case__ : Optional[Any] = model.decode(snake_case_ ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
assert torch_all_close(snake_case_ , snake_case_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[4_7, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def __magic_name__ ( self : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Tuple ):
'''simple docstring'''
snake_case__ : List[Any] = self.get_sd_vae_model()
snake_case__ : Optional[Any] = self.get_sd_image(snake_case_ )
snake_case__ : str = self.get_generator(snake_case_ )
with torch.no_grad():
snake_case__ : List[str] = model.encode(snake_case_ ).latent_dist
snake_case__ : List[str] = dist.sample(generator=snake_case_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
snake_case__ : Union[str, Any] = sample[0, -1, -3:, -3:].flatten().cpu()
snake_case__ : Union[str, Any] = torch.tensor(snake_case_ )
snake_case__ : List[str] = 3e-3 if torch_device != '''mps''' else 1e-2
assert torch_all_close(snake_case_ , snake_case_ , atol=snake_case_ )
| 502
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class a :
"""simple docstring"""
def __init__( self : Any , snake_case_ : str , snake_case_ : Optional[Any]=1_3 , snake_case_ : int=7 , snake_case_ : int=True , snake_case_ : Optional[Any]=True , snake_case_ : Dict=True , snake_case_ : int=True , snake_case_ : Optional[Any]=9_9 , snake_case_ : int=6_4 , snake_case_ : Dict=5 , snake_case_ : List[Any]=4 , snake_case_ : Union[str, Any]=3_7 , snake_case_ : Dict="gelu" , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Dict=0.1 , snake_case_ : Any=5_1_2 , snake_case_ : Any=1_6 , snake_case_ : Any=2 , snake_case_ : Dict=0.0_2 , snake_case_ : List[str]=3 , snake_case_ : Optional[int]=4 , snake_case_ : str=None , ):
'''simple docstring'''
snake_case__ : List[Any] = parent
snake_case__ : int = batch_size
snake_case__ : Dict = seq_length
snake_case__ : int = is_training
snake_case__ : Optional[Any] = use_input_mask
snake_case__ : Optional[Any] = use_token_type_ids
snake_case__ : Dict = use_labels
snake_case__ : int = vocab_size
snake_case__ : Any = hidden_size
snake_case__ : int = num_hidden_layers
snake_case__ : Union[str, Any] = num_attention_heads
snake_case__ : List[Any] = intermediate_size
snake_case__ : int = hidden_act
snake_case__ : int = hidden_dropout_prob
snake_case__ : List[Any] = attention_probs_dropout_prob
snake_case__ : Optional[int] = max_position_embeddings
snake_case__ : Optional[int] = type_vocab_size
snake_case__ : Any = type_sequence_label_size
snake_case__ : str = initializer_range
snake_case__ : List[str] = num_labels
snake_case__ : Dict = num_choices
snake_case__ : Union[str, Any] = scope
snake_case__ : List[Any] = vocab_size - 1
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Optional[int] = None
if self.use_input_mask:
snake_case__ : int = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Dict = self.get_config()
return config, input_ids, input_mask, token_labels
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ , snake_case__ : str = self.prepare_config_and_inputs()
snake_case__ : List[str] = True
return config, input_ids, input_mask, token_labels
def __magic_name__ ( self : Tuple , snake_case_ : Any , snake_case_ : str , snake_case_ : str ):
'''simple docstring'''
snake_case__ : Any = GPTNeoXModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Union[str, Any] = model(snake_case_ , attention_mask=snake_case_ )
snake_case__ : int = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : List[Any] , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Optional[int] ):
'''simple docstring'''
snake_case__ : Union[str, Any] = True
snake_case__ : Tuple = GPTNeoXModel(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Dict = model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Any , snake_case_ : str , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : List[Any] ):
'''simple docstring'''
snake_case__ : Union[str, Any] = GPTNeoXForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Tuple = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Optional[int] , snake_case_ : str , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
'''simple docstring'''
snake_case__ : Dict = self.num_labels
snake_case__ : List[Any] = GPTNeoXForQuestionAnswering(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Any = model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : List[Any] , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Dict ):
'''simple docstring'''
snake_case__ : str = self.num_labels
snake_case__ : List[str] = GPTNeoXForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : str = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : Dict , snake_case_ : Optional[int] ):
'''simple docstring'''
snake_case__ : Any = self.num_labels
snake_case__ : Any = GPTNeoXForTokenClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Union[str, Any] , snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
'''simple docstring'''
snake_case__ : Optional[Any] = True
snake_case__ : Union[str, Any] = GPTNeoXForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
# first forward pass
snake_case__ : int = model(snake_case_ , attention_mask=snake_case_ , use_cache=snake_case_ )
snake_case__ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case__ : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case__ : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , output_hidden_states=snake_case_ )
snake_case__ : Union[str, Any] = output_from_no_past['''hidden_states'''][0]
snake_case__ : str = model(
snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ , output_hidden_states=snake_case_ , )['''hidden_states'''][0]
# select random slice
snake_case__ : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
snake_case__ : Dict = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Tuple = config_and_inputs
snake_case__ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase = (
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : Optional[int] = GPTNeoXModelTester(self )
snake_case__ : List[str] = ConfigTester(self , config_class=snake_case_ , hidden_size=6_4 , num_attention_heads=8 )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : Dict ):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case__ : Optional[int] = None
self.model_tester.create_and_check_model_as_decoder(snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*snake_case_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def __magic_name__ ( self : Any ):
'''simple docstring'''
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@unittest.skip(reason='''Feed forward chunking is not implemented''' )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def __magic_name__ ( self : Optional[int] , snake_case_ : Optional[Any] ):
'''simple docstring'''
snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = ids_tensor([1, 1_0] , config.vocab_size )
snake_case__ : List[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ : Tuple = GPTNeoXModel(snake_case_ )
original_model.to(snake_case_ )
original_model.eval()
snake_case__ : Any = original_model(snake_case_ ).last_hidden_state
snake_case__ : List[str] = original_model(snake_case_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ : Optional[Any] = {'''type''': scaling_type, '''factor''': 1_0.0}
snake_case__ : Optional[Any] = GPTNeoXModel(snake_case_ )
scaled_model.to(snake_case_ )
scaled_model.eval()
snake_case__ : Optional[int] = scaled_model(snake_case_ ).last_hidden_state
snake_case__ : List[str] = scaled_model(snake_case_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(snake_case_ , snake_case_ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case_ , snake_case_ , atol=1e-5 ) )
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ : Dict = AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
for checkpointing in [True, False]:
snake_case__ : str = GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(snake_case_ )
snake_case__ : Tuple = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(snake_case_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
snake_case__ : List[str] = '''My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'''
snake_case__ : Optional[int] = model.generate(**snake_case_ , do_sample=snake_case_ , max_new_tokens=2_0 )
snake_case__ : Tuple = tokenizer.batch_decode(snake_case_ )[0]
self.assertEqual(snake_case_ , snake_case_ )
| 502
| 1
|
import math
import qiskit
def __UpperCamelCase ( _lowerCAmelCase = 1 , _lowerCAmelCase = 1 , _lowerCAmelCase = 1 ):
"""simple docstring"""
if (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
or isinstance(_lowerCAmelCase , _lowerCAmelCase )
or isinstance(_lowerCAmelCase , _lowerCAmelCase )
):
raise TypeError("inputs must be integers." )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("inputs must be positive." )
if (
(math.floor(_lowerCAmelCase ) != input_a)
or (math.floor(_lowerCAmelCase ) != input_a)
or (math.floor(_lowerCAmelCase ) != carry_in)
):
raise ValueError("inputs must be exact integers." )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("inputs must be less or equal to 2." )
# build registers
UpperCAmelCase = qiskit.QuantumRegister(4 , "qr" )
UpperCAmelCase = qiskit.ClassicalRegister(2 , "cr" )
# list the entries
UpperCAmelCase = [input_a, input_a, carry_in]
UpperCAmelCase = qiskit.QuantumCircuit(_lowerCAmelCase , _lowerCAmelCase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(_lowerCAmelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_lowerCAmelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_lowerCAmelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , _lowerCAmelCase ) # measure the last two qbits
UpperCAmelCase = qiskit.Aer.get_backend("aer_simulator" )
UpperCAmelCase = qiskit.execute(_lowerCAmelCase , _lowerCAmelCase , shots=10_00 )
return job.result().get_counts(_lowerCAmelCase )
if __name__ == "__main__":
print(f"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
| 333
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __magic_name__ ( _a):
_UpperCAmelCase : int = 'beit'
def __init__( self : Union[str, Any] ,__SCREAMING_SNAKE_CASE : int=8_1_9_2 ,__SCREAMING_SNAKE_CASE : List[Any]=7_6_8 ,__SCREAMING_SNAKE_CASE : Any=1_2 ,__SCREAMING_SNAKE_CASE : List[str]=1_2 ,__SCREAMING_SNAKE_CASE : Optional[Any]=3_0_7_2 ,__SCREAMING_SNAKE_CASE : Dict="gelu" ,__SCREAMING_SNAKE_CASE : Tuple=0.0 ,__SCREAMING_SNAKE_CASE : int=0.0 ,__SCREAMING_SNAKE_CASE : int=0.02 ,__SCREAMING_SNAKE_CASE : Optional[int]=1e-12 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=2_2_4 ,__SCREAMING_SNAKE_CASE : List[str]=1_6 ,__SCREAMING_SNAKE_CASE : Any=3 ,__SCREAMING_SNAKE_CASE : Optional[Any]=False ,__SCREAMING_SNAKE_CASE : int=False ,__SCREAMING_SNAKE_CASE : List[str]=False ,__SCREAMING_SNAKE_CASE : List[str]=False ,__SCREAMING_SNAKE_CASE : Optional[int]=0.1 ,__SCREAMING_SNAKE_CASE : Tuple=0.1 ,__SCREAMING_SNAKE_CASE : Optional[int]=True ,__SCREAMING_SNAKE_CASE : str=[3, 5, 7, 1_1] ,__SCREAMING_SNAKE_CASE : int=[1, 2, 3, 6] ,__SCREAMING_SNAKE_CASE : Dict=True ,__SCREAMING_SNAKE_CASE : Any=0.4 ,__SCREAMING_SNAKE_CASE : List[Any]=2_5_6 ,__SCREAMING_SNAKE_CASE : List[Any]=1 ,__SCREAMING_SNAKE_CASE : Tuple=False ,__SCREAMING_SNAKE_CASE : Any=2_5_5 ,**__SCREAMING_SNAKE_CASE : List[str] ,):
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = use_mask_token
UpperCAmelCase = use_absolute_position_embeddings
UpperCAmelCase = use_relative_position_bias
UpperCAmelCase = use_shared_relative_position_bias
UpperCAmelCase = layer_scale_init_value
UpperCAmelCase = drop_path_rate
UpperCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCAmelCase = out_indices
UpperCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCAmelCase = use_auxiliary_head
UpperCAmelCase = auxiliary_loss_weight
UpperCAmelCase = auxiliary_channels
UpperCAmelCase = auxiliary_num_convs
UpperCAmelCase = auxiliary_concat_input
UpperCAmelCase = semantic_loss_ignore_index
class __magic_name__ ( _a):
_UpperCAmelCase : List[str] = version.parse('1.11')
@property
def _UpperCAmelCase ( self : Any ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _UpperCAmelCase ( self : int ):
return 1e-4
| 333
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowercase = logging.get_logger(__name__)
class lowercase_ ( A ):
__lowerCamelCase = ["pixel_values"]
def __init__( self , __A = True , __A = None , __A = None , __A = PILImageResampling.BILINEAR , __A = True , __A = 1 / 255 , __A = True , __A = None , __A = None , **__A , ) -> None:
super().__init__(**__A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =size if size is not None else {'''shortest_edge''': 384}
SCREAMING_SNAKE_CASE_ : Tuple =get_size_dict(__A , default_to_square=__A )
SCREAMING_SNAKE_CASE_ : List[str] =do_resize
SCREAMING_SNAKE_CASE_ : Tuple =size
# Default value set here for backwards compatibility where the value in config is None
SCREAMING_SNAKE_CASE_ : Optional[int] =crop_pct if crop_pct is not None else 224 / 256
SCREAMING_SNAKE_CASE_ : Optional[int] =resample
SCREAMING_SNAKE_CASE_ : Any =do_rescale
SCREAMING_SNAKE_CASE_ : Any =rescale_factor
SCREAMING_SNAKE_CASE_ : Any =do_normalize
SCREAMING_SNAKE_CASE_ : Union[str, Any] =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_ : Dict =image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case ( self , __A , __A , __A , __A = PILImageResampling.BICUBIC , __A = None , **__A , ) -> np.ndarray:
SCREAMING_SNAKE_CASE_ : str =get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
SCREAMING_SNAKE_CASE_ : str =size['''shortest_edge''']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
SCREAMING_SNAKE_CASE_ : Any =int(shortest_edge / crop_pct )
SCREAMING_SNAKE_CASE_ : str =get_resize_output_image_size(__A , size=__A , default_to_square=__A )
SCREAMING_SNAKE_CASE_ : int =resize(image=__A , size=__A , resample=__A , data_format=__A , **__A )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__A , size=(shortest_edge, shortest_edge) , data_format=__A , **__A )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__A , size=(shortest_edge, shortest_edge) , resample=__A , data_format=__A , **__A )
def _snake_case ( self , __A , __A , __A = None , **__A , ) -> int:
return rescale(__A , scale=__A , data_format=__A , **__A )
def _snake_case ( self , __A , __A , __A , __A = None , **__A , ) -> np.ndarray:
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def _snake_case ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> PIL.Image.Image:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : int =crop_pct if crop_pct is not None else self.crop_pct
SCREAMING_SNAKE_CASE_ : Dict =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : List[str] =do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : Optional[int] =rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : Dict =do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : Optional[Any] =image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : Dict =image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : Any =size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : List[Any] =get_size_dict(__A , default_to_square=__A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : Optional[int] =[to_numpy_array(__A ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : Optional[Any] =[self.resize(image=__A , size=__A , crop_pct=__A , resample=__A ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : List[Any] =[self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : str =[self.normalize(image=__A , mean=__A , std=__A ) for image in images]
SCREAMING_SNAKE_CASE_ : List[Any] =[to_channel_dimension_format(__A , __A ) for image in images]
SCREAMING_SNAKE_CASE_ : Optional[int] ={'''pixel_values''': images}
return BatchFeature(data=__A , tensor_type=__A )
| 431
|
# Function to print upper half of diamond (pyramid)
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int ) -> Dict:
for i in range(0 , UpperCAmelCase_ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[Any] ) -> List[str]:
for i in range(UpperCAmelCase_ , 0 , -1 ):
for _ in range(UpperCAmelCase_ , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Union[str, Any] ) -> List[Any]:
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(UpperCAmelCase_ ) # upper half
reverse_floyd(UpperCAmelCase_ ) # lower half
if __name__ == "__main__":
print(R"""| /\ | |- | |- |--| |\ /| |-""")
print(R"""|/ \| |- |_ |_ |__| | \/ | |_""")
_lowercase = 1
while K:
_lowercase = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
_lowercase = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 431
| 1
|
def __UpperCAmelCase( lowercase_ ):
_lowerCamelCase : int = len(lowercase_ )
_lowerCamelCase : str = len(matrix[0] )
_lowerCamelCase : Tuple = min(lowercase_ , lowercase_ )
for row in range(lowercase_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , lowercase_ ):
_lowerCamelCase : Dict = matrix[col][row] / matrix[row][row]
for i in range(lowercase_ , lowercase_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
_lowerCamelCase : Union[str, Any] = True
for i in range(row + 1 , lowercase_ ):
if matrix[i][row] != 0:
_lowerCamelCase, _lowerCamelCase : str = matrix[i], matrix[row]
_lowerCamelCase : str = False
break
if reduce:
rank -= 1
for i in range(lowercase_ ):
_lowerCamelCase : Any = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __A ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = """vit_msn"""
def __init__( self , a__=768 , a__=12 , a__=12 , a__=3072 , a__="gelu" , a__=0.0 , a__=0.0 , a__=0.02 , a__=1e-06 , a__=224 , a__=16 , a__=3 , a__=True , **a__ , ):
"""simple docstring"""
super().__init__(**a__)
_lowerCamelCase : Tuple = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : Optional[int] = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Optional[Any] = layer_norm_eps
_lowerCamelCase : Dict = image_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : Optional[int] = qkv_bias
| 114
| 1
|
'''simple docstring'''
import os
def _lowerCAmelCase ( ) ->Union[str, Any]:
"""simple docstring"""
lowercase__ = os.path.dirname(os.path.realpath(lowercase ) )
lowercase__ = os.path.join(lowercase , '''triangle.txt''' )
with open(lowercase ) as f:
lowercase__ = f.readlines()
lowercase__ = []
for line in triangle:
lowercase__ = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(lowercase ) )
a.append(lowercase )
for i in range(1 , len(lowercase ) ):
for j in range(len(a[i] ) ):
lowercase__ = a[i - 1][j] if j != len(a[i - 1] ) else 0
lowercase__ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowercase , lowercase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 318
|
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _lowerCAmelCase ( lowercase : List[Any] , lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Union[str, Any]=1_0_2_4 ) ->str:
"""simple docstring"""
lowercase__ , lowercase__ = [], []
lowercase__ = list(zip(lowercase , lowercase ) )
lowercase__ , lowercase__ = sorted_examples[0]
def is_too_big(lowercase : Union[str, Any] ):
return tok(lowercase , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
lowercase__ = new_src + ''' ''' + src
lowercase__ = new_tgt + ''' ''' + tgt
if is_too_big(lowercase ) or is_too_big(lowercase ): # cant fit, finalize example
finished_src.append(lowercase )
finished_tgt.append(lowercase )
lowercase__ , lowercase__ = src, tgt
else: # can fit, keep adding
lowercase__ , lowercase__ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(lowercase )
finished_tgt.append(lowercase )
return finished_src, finished_tgt
def _lowerCAmelCase ( lowercase : Union[str, Any] , lowercase : Path , lowercase : str , lowercase : List[str] ) ->Dict:
"""simple docstring"""
lowercase__ = Path(lowercase )
save_path.mkdir(exist_ok=lowercase )
for split in ["train"]:
lowercase__ , lowercase__ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
lowercase__ = [x.rstrip() for x in Path(lowercase ).open().readlines()]
lowercase__ = [x.rstrip() for x in Path(lowercase ).open().readlines()]
lowercase__ , lowercase__ = pack_examples(lowercase , lowercase , lowercase , lowercase )
print(F'''packed {split} split from {len(lowercase )} examples -> {len(lowercase )}.''' )
Path(save_path / F'''{split}.source''' ).open('''w''' ).write('''\n'''.join(lowercase ) )
Path(save_path / F'''{split}.target''' ).open('''w''' ).write('''\n'''.join(lowercase ) )
for split in ["val", "test"]:
lowercase__ , lowercase__ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
shutil.copyfile(lowercase , save_path / F'''{split}.source''' )
shutil.copyfile(lowercase , save_path / F'''{split}.target''' )
def _lowerCAmelCase ( ) ->Optional[Any]:
"""simple docstring"""
lowercase__ = argparse.ArgumentParser()
parser.add_argument('''--tok_name''' , type=lowercase , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''--max_seq_len''' , type=lowercase , default=1_2_8 )
parser.add_argument('''--data_dir''' , type=lowercase )
parser.add_argument('''--save_path''' , type=lowercase )
lowercase__ = parser.parse_args()
lowercase__ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(lowercase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 318
| 1
|
from math import sqrt
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Optional[int]:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(UpperCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case__ ( __SCREAMING_SNAKE_CASE = 1_0001 ) -> str:
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
while count != nth and number < 3:
number += 1
if is_prime(UpperCamelCase__ ):
count += 1
while count != nth:
number += 2
if is_prime(UpperCamelCase__ ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 579
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {'vocab_file': 'spm_char.model'}
UpperCAmelCase_ : int = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
UpperCAmelCase_ : List[Any] = {
'microsoft/speecht5_asr': 1_0_2_4,
'microsoft/speecht5_tts': 1_0_2_4,
'microsoft/speecht5_vc': 1_0_2_4,
}
class _lowerCamelCase ( snake_case_ ):
'''simple docstring'''
__lowercase : Optional[Any] = VOCAB_FILES_NAMES
__lowercase : int = PRETRAINED_VOCAB_FILES_MAP
__lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self , __lowercase , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase = None , **__lowercase , ):
"""simple docstring"""
__A : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
__A : Optional[Any] = vocab_file
__A : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowercase )
@property
def snake_case__ ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def snake_case__ ( self ):
"""simple docstring"""
__A : Tuple = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
__A : List[Any] = self.__dict__.copy()
__A : str = None
return state
def __setstate__( self , __lowercase ):
"""simple docstring"""
__A : List[str] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__A : Any = {}
__A : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
return self.sp_model.encode(__lowercase , out_type=__lowercase )
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
return self.sp_model.piece_to_id(__lowercase )
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
__A : Optional[Any] = self.sp_model.IdToPiece(__lowercase )
return token
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
__A : str = []
__A : str = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowercase ) + token
__A : Any = []
else:
current_sub_tokens.append(__lowercase )
out_string += self.sp_model.decode(__lowercase )
return out_string.strip()
def snake_case__ ( self , __lowercase , __lowercase=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def snake_case__ ( self , __lowercase , __lowercase = None , __lowercase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
__A : Dict = [1]
if token_ids_a is None:
return ([0] * len(__lowercase )) + suffix_ones
return ([0] * len(__lowercase )) + ([0] * len(__lowercase )) + suffix_ones
def snake_case__ ( self , __lowercase , __lowercase = None ):
"""simple docstring"""
if not os.path.isdir(__lowercase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__A : Optional[int] = os.path.join(
__lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase , 'wb' ) as fi:
__A : Tuple = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,)
| 365
| 0
|
'''simple docstring'''
import os
def SCREAMING_SNAKE_CASE ( ):
with open(os.path.dirname(a_ ) + '/p022_names.txt' ) as file:
__a = str(file.readlines()[0] )
__a = names.replace('"' , '' ).split(',' )
names.sort()
__a = 0
__a = 0
for i, name in enumerate(a_ ):
for letter in name:
name_score += ord(a_ ) - 64
total_score += (i + 1) * name_score
__a = 0
return total_score
if __name__ == "__main__":
print(solution())
| 490
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
UpperCAmelCase_ = False
class __lowercase ( unittest.TestCase ):
def UpperCamelCase__ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase__ ( self ) -> Tuple:
return 12
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
return 12
@property
def UpperCamelCase__ ( self ) -> List[str]:
return 32
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
torch.manual_seed(0 )
__a = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
__a = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def UpperCamelCase__ ( self ) -> Tuple:
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(UpperCamelCase )
@property
def UpperCamelCase__ ( self ) -> Tuple:
torch.manual_seed(0 )
__a = 12
__a = 12
__a = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
__a = TransformeraDModel(**UpperCamelCase )
return model
def UpperCamelCase__ ( self ) -> Optional[Any]:
__a = 'cpu'
__a = self.dummy_vqvae
__a = self.dummy_text_encoder
__a = self.dummy_tokenizer
__a = self.dummy_transformer
__a = VQDiffusionScheduler(self.num_embed )
__a = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCamelCase )
__a = VQDiffusionPipeline(
vqvae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , transformer=UpperCamelCase , scheduler=UpperCamelCase , learned_classifier_free_sampling_embeddings=UpperCamelCase , )
__a = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__a = 'teddy bear playing in the pool'
__a = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
__a = pipe([prompt] , generator=UpperCamelCase , num_inference_steps=2 , output_type='np' )
__a = output.images
__a = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
__a = pipe(
[prompt] , generator=UpperCamelCase , output_type='np' , return_dict=UpperCamelCase , num_inference_steps=2 )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__a = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self ) -> int:
__a = 'cpu'
__a = self.dummy_vqvae
__a = self.dummy_text_encoder
__a = self.dummy_tokenizer
__a = self.dummy_transformer
__a = VQDiffusionScheduler(self.num_embed )
__a = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCamelCase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
__a = VQDiffusionPipeline(
vqvae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , transformer=UpperCamelCase , scheduler=UpperCamelCase , learned_classifier_free_sampling_embeddings=UpperCamelCase , )
__a = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__a = 'teddy bear playing in the pool'
__a = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
__a = pipe([prompt] , generator=UpperCamelCase , num_inference_steps=2 , output_type='np' )
__a = output.images
__a = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
__a = pipe(
[prompt] , generator=UpperCamelCase , output_type='np' , return_dict=UpperCamelCase , num_inference_steps=2 )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__a = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCamelCase__ ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
__a = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
__a = pipeline.to(UpperCamelCase )
pipeline.set_progress_bar_config(disable=UpperCamelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__a = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
__a = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=UpperCamelCase , output_type='np' , )
__a = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 490
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6},
}
}
UpperCamelCase = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_2_8,
'''task_specific_params.summarization.min_length''': 1_2,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_4_2,
'''task_specific_params.summarization_cnn.min_length''': 5_6,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 6_2,
'''task_specific_params.summarization_xsum.min_length''': 1_1,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(lowerCamelCase_) , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = np.random.randn(3 , 4)
self.assertTrue(np.allclose(transpose(lowerCamelCase_) , x.transpose()))
UpperCamelCase = np.random.randn(3 , 4 , 5)
self.assertTrue(np.allclose(transpose(lowerCamelCase_ , axes=(1, 2, 0)) , x.transpose((1, 2, 0))))
@require_torch
def UpperCAmelCase__ ( self) -> Dict:
UpperCamelCase = np.random.randn(3 , 4)
UpperCamelCase = torch.tensor(lowerCamelCase_)
self.assertTrue(np.allclose(transpose(lowerCamelCase_) , transpose(lowerCamelCase_).numpy()))
UpperCamelCase = np.random.randn(3 , 4 , 5)
UpperCamelCase = torch.tensor(lowerCamelCase_)
self.assertTrue(np.allclose(transpose(lowerCamelCase_ , axes=(1, 2, 0)) , transpose(lowerCamelCase_ , axes=(1, 2, 0)).numpy()))
@require_tf
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = np.random.randn(3 , 4)
UpperCamelCase = tf.constant(lowerCamelCase_)
self.assertTrue(np.allclose(transpose(lowerCamelCase_) , transpose(lowerCamelCase_).numpy()))
UpperCamelCase = np.random.randn(3 , 4 , 5)
UpperCamelCase = tf.constant(lowerCamelCase_)
self.assertTrue(np.allclose(transpose(lowerCamelCase_ , axes=(1, 2, 0)) , transpose(lowerCamelCase_ , axes=(1, 2, 0)).numpy()))
@require_flax
def UpperCAmelCase__ ( self) -> Dict:
UpperCamelCase = np.random.randn(3 , 4)
UpperCamelCase = jnp.array(lowerCamelCase_)
self.assertTrue(np.allclose(transpose(lowerCamelCase_) , np.asarray(transpose(lowerCamelCase_))))
UpperCamelCase = np.random.randn(3 , 4 , 5)
UpperCamelCase = jnp.array(lowerCamelCase_)
self.assertTrue(np.allclose(transpose(lowerCamelCase_ , axes=(1, 2, 0)) , np.asarray(transpose(lowerCamelCase_ , axes=(1, 2, 0)))))
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = np.random.randn(3 , 4)
self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (4, 3)) , np.reshape(lowerCamelCase_ , (4, 3))))
UpperCamelCase = np.random.randn(3 , 4 , 5)
self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (1_2, 5)) , np.reshape(lowerCamelCase_ , (1_2, 5))))
@require_torch
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = np.random.randn(3 , 4)
UpperCamelCase = torch.tensor(lowerCamelCase_)
self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (4, 3)) , reshape(lowerCamelCase_ , (4, 3)).numpy()))
UpperCamelCase = np.random.randn(3 , 4 , 5)
UpperCamelCase = torch.tensor(lowerCamelCase_)
self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (1_2, 5)) , reshape(lowerCamelCase_ , (1_2, 5)).numpy()))
@require_tf
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = np.random.randn(3 , 4)
UpperCamelCase = tf.constant(lowerCamelCase_)
self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (4, 3)) , reshape(lowerCamelCase_ , (4, 3)).numpy()))
UpperCamelCase = np.random.randn(3 , 4 , 5)
UpperCamelCase = tf.constant(lowerCamelCase_)
self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (1_2, 5)) , reshape(lowerCamelCase_ , (1_2, 5)).numpy()))
@require_flax
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = np.random.randn(3 , 4)
UpperCamelCase = jnp.array(lowerCamelCase_)
self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (4, 3)) , np.asarray(reshape(lowerCamelCase_ , (4, 3)))))
UpperCamelCase = np.random.randn(3 , 4 , 5)
UpperCamelCase = jnp.array(lowerCamelCase_)
self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (1_2, 5)) , np.asarray(reshape(lowerCamelCase_ , (1_2, 5)))))
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = np.random.randn(1 , 3 , 4)
self.assertTrue(np.allclose(squeeze(lowerCamelCase_) , np.squeeze(lowerCamelCase_)))
UpperCamelCase = np.random.randn(1 , 4 , 1 , 5)
self.assertTrue(np.allclose(squeeze(lowerCamelCase_ , axis=2) , np.squeeze(lowerCamelCase_ , axis=2)))
@require_torch
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = np.random.randn(1 , 3 , 4)
UpperCamelCase = torch.tensor(lowerCamelCase_)
self.assertTrue(np.allclose(squeeze(lowerCamelCase_) , squeeze(lowerCamelCase_).numpy()))
UpperCamelCase = np.random.randn(1 , 4 , 1 , 5)
UpperCamelCase = torch.tensor(lowerCamelCase_)
self.assertTrue(np.allclose(squeeze(lowerCamelCase_ , axis=2) , squeeze(lowerCamelCase_ , axis=2).numpy()))
@require_tf
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = np.random.randn(1 , 3 , 4)
UpperCamelCase = tf.constant(lowerCamelCase_)
self.assertTrue(np.allclose(squeeze(lowerCamelCase_) , squeeze(lowerCamelCase_).numpy()))
UpperCamelCase = np.random.randn(1 , 4 , 1 , 5)
UpperCamelCase = tf.constant(lowerCamelCase_)
self.assertTrue(np.allclose(squeeze(lowerCamelCase_ , axis=2) , squeeze(lowerCamelCase_ , axis=2).numpy()))
@require_flax
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = np.random.randn(1 , 3 , 4)
UpperCamelCase = jnp.array(lowerCamelCase_)
self.assertTrue(np.allclose(squeeze(lowerCamelCase_) , np.asarray(squeeze(lowerCamelCase_))))
UpperCamelCase = np.random.randn(1 , 4 , 1 , 5)
UpperCamelCase = jnp.array(lowerCamelCase_)
self.assertTrue(np.allclose(squeeze(lowerCamelCase_ , axis=2) , np.asarray(squeeze(lowerCamelCase_ , axis=2))))
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = np.random.randn(3 , 4)
self.assertTrue(np.allclose(expand_dims(lowerCamelCase_ , axis=1) , np.expand_dims(lowerCamelCase_ , axis=1)))
@require_torch
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = np.random.randn(3 , 4)
UpperCamelCase = torch.tensor(lowerCamelCase_)
self.assertTrue(np.allclose(expand_dims(lowerCamelCase_ , axis=1) , expand_dims(lowerCamelCase_ , axis=1).numpy()))
@require_tf
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = np.random.randn(3 , 4)
UpperCamelCase = tf.constant(lowerCamelCase_)
self.assertTrue(np.allclose(expand_dims(lowerCamelCase_ , axis=1) , expand_dims(lowerCamelCase_ , axis=1).numpy()))
@require_flax
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = np.random.randn(3 , 4)
UpperCamelCase = jnp.array(lowerCamelCase_)
self.assertTrue(np.allclose(expand_dims(lowerCamelCase_ , axis=1) , np.asarray(expand_dims(lowerCamelCase_ , axis=1))))
| 34
|
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
lowerCamelCase__ = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE__ :bool = field(
default=_UpperCamelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
SCREAMING_SNAKE_CASE__ :bool = field(
default=_UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
SCREAMING_SNAKE_CASE__ :Optional[int] = field(
default=_UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE__ :Optional[int] = field(
default=_UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE__ :Optional[int] = field(
default=_UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = field(
default=_UpperCamelCase , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE__ :str = field(
default=_UpperCamelCase , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "Train language if it is different from the evaluation language."} )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
SCREAMING_SNAKE_CASE__ :Optional[bool] = field(
default=_UpperCamelCase , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
SCREAMING_SNAKE_CASE__ :bool = field(
default=_UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
SCREAMING_SNAKE_CASE__ :str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
SCREAMING_SNAKE_CASE__ :bool = field(
default=_UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
SCREAMING_SNAKE_CASE__ :bool = field(
default=_UpperCamelCase , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def lowercase__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" ,lowercase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCamelCase : str = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
datasets.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_UpperCamelCase : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
_UpperCamelCase : List[Any] = load_dataset(
"xnli" ,model_args.language ,split="train" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
_UpperCamelCase : Optional[int] = load_dataset(
"xnli" ,model_args.train_language ,split="train" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
_UpperCamelCase : str = train_dataset.features["label"].names
if training_args.do_eval:
_UpperCamelCase : int = load_dataset(
"xnli" ,model_args.language ,split="validation" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
_UpperCamelCase : List[Any] = eval_dataset.features["label"].names
if training_args.do_predict:
_UpperCamelCase : int = load_dataset(
"xnli" ,model_args.language ,split="test" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
_UpperCamelCase : List[str] = predict_dataset.features["label"].names
# Labels
_UpperCamelCase : List[Any] = len(lowercase_ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=lowercase_ ,idalabel={str(lowercase_ ): label for i, label in enumerate(lowercase_ )} ,labelaid={label: i for i, label in enumerate(lowercase_ )} ,finetuning_task="xnli" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
_UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,do_lower_case=model_args.do_lower_case ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
_UpperCamelCase : List[str] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(".ckpt" in model_args.model_name_or_path ) ,config=lowercase_ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
_UpperCamelCase : Any = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_UpperCamelCase : Optional[Any] = False
def preprocess_function(lowercase_ ):
# Tokenize the texts
return tokenizer(
examples["premise"] ,examples["hypothesis"] ,padding=lowercase_ ,max_length=data_args.max_seq_length ,truncation=lowercase_ ,)
if training_args.do_train:
if data_args.max_train_samples is not None:
_UpperCamelCase : Optional[Any] = min(len(lowercase_ ) ,data_args.max_train_samples )
_UpperCamelCase : Optional[Any] = train_dataset.select(range(lowercase_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
_UpperCamelCase : Union[str, Any] = train_dataset.map(
lowercase_ ,batched=lowercase_ ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on train dataset" ,)
# Log a few random samples from the training set:
for index in random.sample(range(len(lowercase_ ) ) ,3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_UpperCamelCase : List[Any] = min(len(lowercase_ ) ,data_args.max_eval_samples )
_UpperCamelCase : str = eval_dataset.select(range(lowercase_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
_UpperCamelCase : Dict = eval_dataset.map(
lowercase_ ,batched=lowercase_ ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on validation dataset" ,)
if training_args.do_predict:
if data_args.max_predict_samples is not None:
_UpperCamelCase : Optional[int] = min(len(lowercase_ ) ,data_args.max_predict_samples )
_UpperCamelCase : int = predict_dataset.select(range(lowercase_ ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
_UpperCamelCase : Union[str, Any] = predict_dataset.map(
lowercase_ ,batched=lowercase_ ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on prediction dataset" ,)
# Get the metric function
_UpperCamelCase : Union[str, Any] = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase_ ):
_UpperCamelCase : int = p.predictions[0] if isinstance(p.predictions ,lowercase_ ) else p.predictions
_UpperCamelCase : Union[str, Any] = np.argmax(lowercase_ ,axis=1 )
return metric.compute(predictions=lowercase_ ,references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_UpperCamelCase : str = default_data_collator
elif training_args.fpaa:
_UpperCamelCase : Tuple = DataCollatorWithPadding(lowercase_ ,pad_to_multiple_of=8 )
else:
_UpperCamelCase : List[Any] = None
# Initialize our Trainer
_UpperCamelCase : List[str] = Trainer(
model=lowercase_ ,args=lowercase_ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=lowercase_ ,tokenizer=lowercase_ ,data_collator=lowercase_ ,)
# Training
if training_args.do_train:
_UpperCamelCase : str = None
if training_args.resume_from_checkpoint is not None:
_UpperCamelCase : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCamelCase : Optional[Any] = last_checkpoint
_UpperCamelCase : Optional[Any] = trainer.train(resume_from_checkpoint=lowercase_ )
_UpperCamelCase : Tuple = train_result.metrics
_UpperCamelCase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase_ )
)
_UpperCamelCase : Tuple = min(lowercase_ ,len(lowercase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" ,lowercase_ )
trainer.save_metrics("train" ,lowercase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCamelCase : Union[str, Any] = trainer.evaluate(eval_dataset=lowercase_ )
_UpperCamelCase : Optional[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase_ )
_UpperCamelCase : Dict = min(lowercase_ ,len(lowercase_ ) )
trainer.log_metrics("eval" ,lowercase_ )
trainer.save_metrics("eval" ,lowercase_ )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Tuple = trainer.predict(lowercase_ ,metric_key_prefix="predict" )
_UpperCamelCase : Tuple = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowercase_ )
)
_UpperCamelCase : Dict = min(lowercase_ ,len(lowercase_ ) )
trainer.log_metrics("predict" ,lowercase_ )
trainer.save_metrics("predict" ,lowercase_ )
_UpperCamelCase : Dict = np.argmax(lowercase_ ,axis=1 )
_UpperCamelCase : List[Any] = os.path.join(training_args.output_dir ,"predictions.txt" )
if trainer.is_world_process_zero():
with open(lowercase_ ,"w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(lowercase_ ):
_UpperCamelCase : int = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 624
| 0
|
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a_ : str = random.Random()
def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Any=1.0 , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[int]=None ):
if rng is None:
lowerCamelCase_ = global_rng
lowerCamelCase_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=7 , UpperCamelCase=400 , UpperCamelCase=2000 , UpperCamelCase=2048 , UpperCamelCase=128 , UpperCamelCase=1 , UpperCamelCase=512 , UpperCamelCase=30 , UpperCamelCase=4_4100 , ):
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = min_seq_length
lowerCamelCase_ = max_seq_length
lowerCamelCase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase_ = spectrogram_length
lowerCamelCase_ = feature_size
lowerCamelCase_ = num_audio_channels
lowerCamelCase_ = hop_length
lowerCamelCase_ = chunk_length
lowerCamelCase_ = sampling_rate
def snake_case ( self ):
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def snake_case ( self , UpperCamelCase=False , UpperCamelCase=False ):
"""simple docstring"""
def _flatten(UpperCamelCase ):
return list(itertools.chain(*UpperCamelCase ) )
if equal_length:
lowerCamelCase_ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase_ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase_ = [np.asarray(UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = TvltFeatureExtractor
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = TvltFeatureExtractionTester(self )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCamelCase , "spectrogram_length" ) )
self.assertTrue(hasattr(UpperCamelCase , "feature_size" ) )
self.assertTrue(hasattr(UpperCamelCase , "num_audio_channels" ) )
self.assertTrue(hasattr(UpperCamelCase , "hop_length" ) )
self.assertTrue(hasattr(UpperCamelCase , "chunk_length" ) )
self.assertTrue(hasattr(UpperCamelCase , "sampling_rate" ) )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = feat_extract_first.save_pretrained(UpperCamelCase )[0]
check_json_file_has_correct_format(UpperCamelCase )
lowerCamelCase_ = self.feature_extraction_class.from_pretrained(UpperCamelCase )
lowerCamelCase_ = feat_extract_first.to_dict()
lowerCamelCase_ = feat_extract_second.to_dict()
lowerCamelCase_ = dict_first.pop("mel_filters" )
lowerCamelCase_ = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = os.path.join(UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(UpperCamelCase )
lowerCamelCase_ = self.feature_extraction_class.from_json_file(UpperCamelCase )
lowerCamelCase_ = feat_extract_first.to_dict()
lowerCamelCase_ = feat_extract_second.to_dict()
lowerCamelCase_ = dict_first.pop("mel_filters" )
lowerCamelCase_ = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = [np.asarray(UpperCamelCase ) for speech_input in speech_inputs]
# Test not batched input
lowerCamelCase_ = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
lowerCamelCase_ = feature_extractor(UpperCamelCase , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
lowerCamelCase_ = feature_extractor(
UpperCamelCase , return_tensors="np" , sampling_rate=4_4100 , mask_audio=UpperCamelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
lowerCamelCase_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCamelCase_ = np.asarray(UpperCamelCase )
lowerCamelCase_ = feature_extractor(UpperCamelCase , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
lowerCamelCase_ = ds.sort("id" ).select(range(UpperCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self._load_datasamples(1 )
lowerCamelCase_ = TvltFeatureExtractor()
lowerCamelCase_ = feature_extractor(UpperCamelCase , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
lowerCamelCase_ = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , UpperCamelCase , atol=1e-4 ) )
| 712
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
class snake_case ( lowercase ):
"""simple docstring"""
def __init__( self , UpperCamelCase=1 , UpperCamelCase=0 , UpperCamelCase=2 , UpperCamelCase=512 , UpperCamelCase="cls" , UpperCamelCase=False , UpperCamelCase=True , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
lowerCamelCase_ = project_dim
lowerCamelCase_ = pooler_fn
lowerCamelCase_ = learn_encoder
lowerCamelCase_ = use_attention_mask
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = [r"pooler", r"logit_scale"]
_lowerCamelCase = [r"position_ids", r"predictions.decoder.bias"]
_lowerCamelCase = "roberta"
_lowerCamelCase = RobertaSeriesConfig
def __init__( self , UpperCamelCase ):
"""simple docstring"""
super().__init__(UpperCamelCase )
lowerCamelCase_ = XLMRobertaModel(UpperCamelCase )
lowerCamelCase_ = nn.Linear(config.hidden_size , config.project_dim )
lowerCamelCase_ = getattr(UpperCamelCase , "has_pre_transformation" , UpperCamelCase )
if self.has_pre_transformation:
lowerCamelCase_ = nn.Linear(config.hidden_size , config.project_dim )
lowerCamelCase_ = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def snake_case ( self , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , ):
"""simple docstring"""
lowerCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ = self.base_model(
input_ids=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , position_ids=UpperCamelCase , head_mask=UpperCamelCase , inputs_embeds=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , output_attentions=UpperCamelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=UpperCamelCase , )
if self.has_pre_transformation:
lowerCamelCase_ = outputs["hidden_states"][-2]
lowerCamelCase_ = self.pre_LN(UpperCamelCase )
lowerCamelCase_ = self.transformation_pre(UpperCamelCase )
return TransformationModelOutput(
projection_state=UpperCamelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
lowerCamelCase_ = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=UpperCamelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 445
| 0
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
A_ : str = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __snake_case ( __A : Any ) -> Optional[int]:
'''simple docstring'''
if isinstance(__A , torch.Tensor ):
return image
elif isinstance(__A , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : Optional[Any] = [image]
SCREAMING_SNAKE_CASE : Dict = [trans(img.convert('RGB' ) ) for img in image]
SCREAMING_SNAKE_CASE : Optional[Any] = torch.stack(__A )
return image
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
SCREAMING_SNAKE_CASE : int = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict ) -> Any:
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(f"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def _lowerCAmelCase ( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = min(int(num_inference_steps * strength ) , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowerCAmelCase ( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str]=None ) -> Any:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_SCREAMING_SNAKE_CASE )}""" )
SCREAMING_SNAKE_CASE : int = image.to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(_SCREAMING_SNAKE_CASE )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
SCREAMING_SNAKE_CASE : List[Any] = init_latents.shape
SCREAMING_SNAKE_CASE : Union[str, Any] = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
# get latents
print('add noise to latents at timestep' , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = self.scheduler.add_noise(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = init_latents
return latents
@torch.no_grad()
def __call__( self : List[Any] , _SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image] = None , _SCREAMING_SNAKE_CASE : float = 0.8 , _SCREAMING_SNAKE_CASE : int = 1 , _SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : int = 50 , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Optional[str] = "pil" , _SCREAMING_SNAKE_CASE : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
self.check_inputs(_SCREAMING_SNAKE_CASE )
# 2. Preprocess image
SCREAMING_SNAKE_CASE : Optional[Any] = preprocess(_SCREAMING_SNAKE_CASE )
# 3. set timesteps
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=self.device )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.get_timesteps(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.device )
SCREAMING_SNAKE_CASE : str = timesteps[:1].repeat(_SCREAMING_SNAKE_CASE )
# 4. Prepare latent variables
SCREAMING_SNAKE_CASE : str = self.prepare_latents(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.unet.dtype , self.device , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = latents
# 5. Denoising loop
for t in self.progress_bar(_SCREAMING_SNAKE_CASE ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE : Tuple = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , use_clipped_model_output=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , ).prev_sample
SCREAMING_SNAKE_CASE : int = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 265
|
"""simple docstring"""
def __snake_case ( __A : int , __A : int ) -> float:
'''simple docstring'''
return base * power(__A , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
A_ : str = int(input('Enter the base: ').strip())
A_ : Dict = int(input('Enter the exponent: ').strip())
A_ : List[str] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
A_ : Any = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 265
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 599
|
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
UpperCamelCase_ = "tiny-wmt19-en-ru"
# Build
# borrowed from a test
UpperCamelCase_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCamelCase_ = dict(zip(vocab, range(len(vocab))))
UpperCamelCase_ = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_ = Path(tmpdirname)
UpperCamelCase_ = build_dir / VOCAB_FILES_NAMES["src_vocab_file"]
UpperCamelCase_ = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"]
UpperCamelCase_ = build_dir / VOCAB_FILES_NAMES["merges_file"]
with open(src_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, "w") as fp:
fp.write("\n".join(merges))
UpperCamelCase_ = FSMTTokenizer(
langs=["en", "ru"],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
UpperCamelCase_ = FSMTConfig(
langs=["ru", "en"],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
UpperCamelCase_ = FSMTForConditionalGeneration(config)
print(f'num of params {tiny_model.num_parameters()}')
# Test
UpperCamelCase_ = tokenizer(["Making tiny model"], return_tensors="pt")
UpperCamelCase_ = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 599
| 1
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict=1_3 , lowerCAmelCase_ : str=3_2 , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : str=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : Tuple=[2, 2, 3, 2] , lowerCAmelCase_ : str=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[int]=3_7 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : List[Any]=1_0 , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : Dict=["stage2", "stage3", "stage4"] , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=None , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_stages
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = out_features
__lowerCAmelCase = num_labels
__lowerCAmelCase = scope
__lowerCAmelCase = num_stages
def lowercase ( self : Dict ) -> List[str]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self : List[str] ) -> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowercase ( self : Dict ) -> List[str]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=lowerCAmelCase_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=lowerCAmelCase_ , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def lowercase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int ) -> Optional[Any]:
__lowerCAmelCase = UperNetForSemanticSegmentation(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
a_ = {"""image-segmentation""": UperNetForSemanticSegmentation} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : Optional[int] ) -> Dict:
__lowerCAmelCase = UperNetModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : List[str] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : Tuple ) -> Union[str, Any]:
return
def lowercase ( self : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase_ )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def lowercase ( self : Optional[int] ) -> Dict:
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def lowercase ( self : Optional[Any] ) -> Dict:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def lowercase ( self : Optional[int] ) -> List[Any]:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def lowercase ( self : str ) -> Dict:
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowercase ( self : Optional[Any] ) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self : Tuple ) -> List[Any]:
pass
def lowercase ( self : Union[str, Any] ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Any ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = _config_zero_init(lowerCAmelCase_ )
__lowerCAmelCase = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(config=lowerCAmelCase_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='UperNet does not have tied weights' )
def lowercase ( self : Any ) -> int:
pass
@slow
def lowercase ( self : Optional[int] ) -> Optional[int]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def a_ ( ):
__lowerCAmelCase = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k', repo_type='dataset', filename='ADE_val_00000001.jpg' )
__lowerCAmelCase = Image.open(lowerCAmelCase_ ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Dict ) -> Union[str, Any]:
__lowerCAmelCase = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(lowerCAmelCase_ )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
def lowercase ( self : List[Any] ) -> List[str]:
__lowerCAmelCase = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
__lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(lowerCAmelCase_ )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def _snake_case ( self ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[Any] = tempfile.mkdtemp()
# fmt: off
__SCREAMING_SNAKE_CASE : List[str] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
__SCREAMING_SNAKE_CASE : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__SCREAMING_SNAKE_CASE : List[Any] = {
'''do_resize''': True,
'''size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
__SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname , lowercase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(lowercase , lowercase )
def _snake_case ( self , **lowercase ) -> Tuple:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def _snake_case ( self , **lowercase ) -> Optional[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase )
def _snake_case ( self ) -> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__SCREAMING_SNAKE_CASE : int = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = self.get_image_processor()
__SCREAMING_SNAKE_CASE : str = VisionTextDualEncoderProcessor(tokenizer=lowercase , image_processor=lowercase )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : List[Any] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
def _snake_case ( self ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : str = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : str = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
__SCREAMING_SNAKE_CASE : Optional[int] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
def _snake_case ( self ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : List[str] = VisionTextDualEncoderProcessor(tokenizer=lowercase , image_processor=lowercase )
__SCREAMING_SNAKE_CASE : int = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : Dict = image_processor(lowercase , return_tensors='''np''' )
__SCREAMING_SNAKE_CASE : str = processor(images=lowercase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : str = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : List[str] = VisionTextDualEncoderProcessor(tokenizer=lowercase , image_processor=lowercase )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''lower newer'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=lowercase )
__SCREAMING_SNAKE_CASE : str = tokenizer(lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : List[str] = VisionTextDualEncoderProcessor(tokenizer=lowercase , image_processor=lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''lower newer'''
__SCREAMING_SNAKE_CASE : Dict = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : Any = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(lowercase ):
processor()
def _snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : int = VisionTextDualEncoderProcessor(tokenizer=lowercase , image_processor=lowercase )
__SCREAMING_SNAKE_CASE : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE : Optional[Any] = processor.batch_decode(lowercase )
__SCREAMING_SNAKE_CASE : str = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _snake_case ( self ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : str = VisionTextDualEncoderProcessor(tokenizer=lowercase , image_processor=lowercase )
__SCREAMING_SNAKE_CASE : str = '''lower newer'''
__SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 158
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class UpperCamelCase (__snake_case ):
_SCREAMING_SNAKE_CASE : List[str] = """imagegpt"""
_SCREAMING_SNAKE_CASE : List[Any] = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE : Optional[int] = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self :Optional[int] , __magic_name__ :List[Any]=512 + 1 , __magic_name__ :Dict=32 * 32 , __magic_name__ :int=512 , __magic_name__ :Union[str, Any]=24 , __magic_name__ :Any=8 , __magic_name__ :Any=None , __magic_name__ :Optional[int]="quick_gelu" , __magic_name__ :Optional[Any]=0.1 , __magic_name__ :List[Any]=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :List[str]=1E-5 , __magic_name__ :Any=0.02 , __magic_name__ :Optional[Any]=True , __magic_name__ :Union[str, Any]=True , __magic_name__ :Optional[Any]=False , __magic_name__ :List[Any]=False , __magic_name__ :int=False , **__magic_name__ :int , ) ->Tuple:
lowercase : List[Any] = vocab_size
lowercase : Tuple = n_positions
lowercase : Union[str, Any] = n_embd
lowercase : Dict = n_layer
lowercase : Tuple = n_head
lowercase : List[str] = n_inner
lowercase : Dict = activation_function
lowercase : Any = resid_pdrop
lowercase : str = embd_pdrop
lowercase : List[str] = attn_pdrop
lowercase : List[Any] = layer_norm_epsilon
lowercase : int = initializer_range
lowercase : Tuple = scale_attn_weights
lowercase : List[Any] = use_cache
lowercase : Optional[Any] = scale_attn_by_inverse_layer_idx
lowercase : Dict = reorder_and_upcast_attn
lowercase : Union[str, Any] = tie_word_embeddings
super().__init__(tie_word_embeddings=__magic_name__ , **__magic_name__ )
class UpperCamelCase (__snake_case ):
@property
def __snake_case ( self :Dict ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def __snake_case ( self :int , __magic_name__ :"FeatureExtractionMixin" , __magic_name__ :int = 1 , __magic_name__ :int = -1 , __magic_name__ :bool = False , __magic_name__ :Optional["TensorType"] = None , __magic_name__ :int = 3 , __magic_name__ :int = 32 , __magic_name__ :int = 32 , ) ->Mapping[str, Any]:
lowercase : int = self._generate_dummy_images(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowercase : List[str] = dict(preprocessor(images=__magic_name__ , return_tensors=__magic_name__ ) )
return inputs
| 348
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCamelCase ( _A , _A=7 ) -> List[str]:
lowercase : Any = None
if token is not None:
lowercase : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
lowercase : int = """636036"""
lowercase : Dict = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
lowercase : Dict = requests.get(_A , headers=_A ).json()
return result["workflow_runs"]
def UpperCamelCase ( _A ) -> Dict:
lowercase : List[str] = get_daily_ci_runs(_A )
lowercase : List[Any] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase : str = workflow_run["""id"""]
break
return workflow_run_id
def UpperCamelCase ( _A , _A , _A ) -> Optional[int]:
lowercase : List[Any] = get_last_daily_ci_runs(_A )
if workflow_run_id is not None:
lowercase : Dict = get_artifacts_links(worflow_run_id=_A , token=_A )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase : Any = artifacts_links[artifact_name]
download_artifact(
artifact_name=_A , artifact_url=_A , output_dir=_A , token=_A )
def UpperCamelCase ( _A , _A , _A ) -> str:
get_last_daily_ci_artifacts(_A , _A , _A )
lowercase : Optional[Any] = {}
for artifact_name in artifact_names:
lowercase : Any = os.path.join(_A , F"""{artifact_name}.zip""" )
if os.path.isfile(_A ):
lowercase : Dict = {}
with zipfile.ZipFile(_A ) as z:
for filename in z.namelist():
if not os.path.isdir(_A ):
# read the file
with z.open(_A ) as f:
lowercase : Optional[int] = f.read().decode("""UTF-8""" )
return results
| 348
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''speech_to_text_2'''
UpperCamelCase_ = ['''past_key_values''']
UpperCamelCase_ = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Tuple , UpperCAmelCase : Dict=1_0000 , UpperCAmelCase : Dict=6 , UpperCAmelCase : Dict=2048 , UpperCAmelCase : int=4 , UpperCAmelCase : Union[str, Any]=0.0 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Dict="relu" , UpperCAmelCase : List[str]=256 , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : int=0.0 , UpperCAmelCase : int=0.0_2 , UpperCAmelCase : Dict=2 , UpperCAmelCase : Tuple=True , UpperCAmelCase : List[Any]=1 , UpperCAmelCase : Any=0 , UpperCAmelCase : Dict=2 , UpperCAmelCase : str=1024 , **UpperCAmelCase : Optional[Any] , ) -> Dict:
'''simple docstring'''
lowercase : Dict =vocab_size
lowercase : Optional[Any] =d_model
lowercase : List[str] =decoder_ffn_dim
lowercase : Union[str, Any] =decoder_layers
lowercase : Any =decoder_attention_heads
lowercase : Optional[Any] =dropout
lowercase : Optional[int] =attention_dropout
lowercase : int =activation_dropout
lowercase : List[str] =activation_function
lowercase : List[str] =init_std
lowercase : str =decoder_layerdrop
lowercase : List[Any] =use_cache
lowercase : List[str] =decoder_layers
lowercase : List[str] =scale_embedding # scale factor will be sqrt(d_model) if True
lowercase : Any =max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , **UpperCAmelCase , )
| 94
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( __magic_name__ : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__magic_name__ ) / len(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 706
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_(unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=3 , A=18 , A=30 , A=400 , A=True , A=None , A=True , ):
_lowerCamelCase : Tuple = size if size is not None else {'height': 18, 'width': 18}
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : Any = num_channels
_lowerCamelCase : Optional[int] = image_size
_lowerCamelCase : List[Any] = min_resolution
_lowerCamelCase : List[str] = max_resolution
_lowerCamelCase : str = do_resize
_lowerCamelCase : Optional[int] = size
_lowerCamelCase : int = apply_ocr
def _lowerCAmelCase ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class A_(SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _lowerCAmelCase ( self ):
_lowerCamelCase : Dict = LayoutLMvaImageProcessingTester(self )
@property
def _lowerCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self ):
_lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , 'do_resize' ) )
self.assertTrue(hasattr(A , 'size' ) )
self.assertTrue(hasattr(A , 'apply_ocr' ) )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
_lowerCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_lowerCamelCase : Tuple = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , A )
self.assertIsInstance(encoding.boxes , A )
# Test batched
_lowerCamelCase : Union[str, Any] = image_processing(A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
_lowerCamelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_lowerCamelCase : List[str] = image_processing(A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_lowerCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_lowerCamelCase : Union[str, Any] = image_processing(A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _lowerCAmelCase ( self ):
# with apply_OCR = True
_lowerCamelCase : Union[str, Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
_lowerCamelCase : Any = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
_lowerCamelCase : Tuple = Image.open(ds[0]['file'] ).convert('RGB' )
_lowerCamelCase : Any = image_processing(A , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_lowerCamelCase : Tuple = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
_lowerCamelCase : Any = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A )
self.assertListEqual(encoding.boxes , A )
# with apply_OCR = False
_lowerCamelCase : Optional[Any] = LayoutLMvaImageProcessor(apply_ocr=A )
_lowerCamelCase : List[Any] = image_processing(A , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 349
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.