code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Union[str, Any] = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 231
|
"""simple docstring"""
import math
import qiskit
def lowercase ( __snake_case : int = 1 , __snake_case : int = 1 , __snake_case : int = 1 ):
if (
isinstance(__snake_case , __snake_case )
or isinstance(__snake_case , __snake_case )
or isinstance(__snake_case , __snake_case )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(__snake_case ) != input_a)
or (math.floor(__snake_case ) != input_a)
or (math.floor(__snake_case ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
lowercase_ : List[Any] = qiskit.QuantumRegister(4 , '''qr''' )
lowercase_ : Dict = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
lowercase_ : Optional[Any] = [input_a, input_a, carry_in]
lowercase_ : List[str] = qiskit.QuantumCircuit(__snake_case , __snake_case )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__snake_case ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__snake_case ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__snake_case ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __snake_case ) # measure the last two qbits
lowercase_ : List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
lowercase_ : Optional[int] = qiskit.execute(__snake_case , __snake_case , shots=1_0_0_0 )
return job.result().get_counts(__snake_case )
if __name__ == "__main__":
print(F"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 231
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''swinv2'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Dict , UpperCAmelCase_ : Union[str, Any]=224 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Optional[Any]=96 , UpperCAmelCase_ : Tuple=[2, 2, 6, 2] , UpperCAmelCase_ : Optional[Any]=[3, 6, 12, 24] , UpperCAmelCase_ : List[str]=7 , UpperCAmelCase_ : Dict=4.0 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : int=1e-5 , UpperCAmelCase_ : Any=32 , **UpperCAmelCase_ : Union[str, Any] , ):
super().__init__(**UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = image_size
UpperCamelCase__ : Any = patch_size
UpperCamelCase__ : Optional[Any] = num_channels
UpperCamelCase__ : str = embed_dim
UpperCamelCase__ : Optional[int] = depths
UpperCamelCase__ : Tuple = len(UpperCAmelCase_)
UpperCamelCase__ : Tuple = num_heads
UpperCamelCase__ : int = window_size
UpperCamelCase__ : int = mlp_ratio
UpperCamelCase__ : List[str] = qkv_bias
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : int = attention_probs_dropout_prob
UpperCamelCase__ : Optional[Any] = drop_path_rate
UpperCamelCase__ : Any = hidden_act
UpperCamelCase__ : Any = use_absolute_embeddings
UpperCamelCase__ : str = layer_norm_eps
UpperCamelCase__ : Tuple = initializer_range
UpperCamelCase__ : Optional[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase__ : List[str] = int(embed_dim * 2 ** (len(UpperCAmelCase_) - 1))
UpperCamelCase__ : Tuple = (0, 0, 0, 0)
| 6
|
'''simple docstring'''
from __future__ import annotations
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : list[list[int]]):
UpperCamelCase__ : int = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.')
if len(UpperCAmelCase_) != 0:
UpperCamelCase__ : str = len(rows[0])
if cols == 0:
raise error
for row in rows:
if len(UpperCAmelCase_) != cols:
raise error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise error
UpperCamelCase__ : Optional[int] = rows
else:
UpperCamelCase__ : Optional[Any] = []
def __UpperCamelCase ( self : Union[str, Any]):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
@property
def __UpperCamelCase ( self : Dict):
return len(self.rows)
@property
def __UpperCamelCase ( self : Tuple):
return len(self.rows[0])
@property
def __UpperCamelCase ( self : List[Any]):
return (self.num_rows, self.num_columns)
@property
def __UpperCamelCase ( self : Any):
return self.order[0] == self.order[1]
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[int] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
for row_num in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0])
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]))
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns))
def __UpperCamelCase ( self : str):
return bool(self.determinant())
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
UpperCamelCase__ : Optional[Any] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns)
if other_column != column
]
for other_row in range(self.num_rows)
if other_row != row
]
return Matrix(UpperCAmelCase_).determinant()
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
if (row + column) % 2 == 0:
return self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
return -1 * self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
return Matrix(
[
[self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) for column in range(self.num_columns)]
for row in range(self.num_rows)
])
def __UpperCamelCase ( self : Optional[int]):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns)
]
for row in range(self.minors().num_rows)
])
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = [
[self.cofactors().rows[column][row] for column in range(self.num_columns)]
for row in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse')
return self.adjugate() * (1 / determinant)
def __repr__( self : Any):
return str(self.rows)
def __str__( self : List[Any]):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0])) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(UpperCAmelCase_) for value in row]) + '.]'
for row in self.rows
])
+ "]"
)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : List[str] = TypeError('Row must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix')
if position is None:
self.rows.append(UpperCAmelCase_)
else:
UpperCamelCase__ : Tuple = self.rows[0:position] + [row] + self.rows[position:]
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : int = TypeError(
'Column must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in column:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix')
if position is None:
UpperCamelCase__ : Optional[int] = [self.rows[i] + [column[i]] for i in range(self.num_rows)]
else:
UpperCamelCase__ : str = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows)
]
def __eq__( self : List[Any] , UpperCAmelCase_ : object):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , UpperCAmelCase_ : object):
return not self == other
def __neg__( self : Union[str, Any]):
return self * -1
def __add__( self : Optional[int] , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __sub__( self : Tuple , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __mul__( self : Any , UpperCAmelCase_ : Matrix | int | float):
if isinstance(UpperCAmelCase_ , (int, float)):
return Matrix(
[[int(element * other) for element in row] for row in self.rows])
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second')
return Matrix(
[
[Matrix.dot_product(UpperCAmelCase_ , UpperCAmelCase_) for column in other.columns()]
for row in self.rows
])
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix')
def __pow__( self : Dict , UpperCAmelCase_ : int):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise TypeError('A Matrix can only be raised to the power of an int')
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power')
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power')
UpperCamelCase__ : str = self
for _ in range(other - 1):
result *= self
return result
@classmethod
def __UpperCamelCase ( cls : Optional[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int]):
return sum(row[i] * column[i] for i in range(len(UpperCAmelCase_)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6
| 1
|
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def A__ ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = FileLock(str(tmpdir / 'foo.lock' ) )
_lowerCAmelCase = FileLock(str(tmpdir / 'foo.lock' ) )
_lowerCAmelCase = 0.01
with locka.acquire():
with pytest.raises(__snake_case ):
_lowerCAmelCase = time.time()
locka.acquire(__snake_case )
assert time.time() - _start > timeout
def A__ ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'a' * 1_0_0_0 + '.lock'
_lowerCAmelCase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(__snake_case )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
_lowerCAmelCase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__snake_case ):
locka.acquire(0 )
| 589
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Union[str, Any] = '''time_series_transformer'''
_snake_case : str = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = "student_t" , _UpperCamelCase = "nll" , _UpperCamelCase = 1 , _UpperCamelCase = [1, 2, 3, 4, 5, 6, 7] , _UpperCamelCase = "mean" , _UpperCamelCase = 0 , _UpperCamelCase = 0 , _UpperCamelCase = 0 , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 3_2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 2 , _UpperCamelCase = 2 , _UpperCamelCase = 2 , _UpperCamelCase = 2 , _UpperCamelCase = True , _UpperCamelCase = "gelu" , _UpperCamelCase = 6_4 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 0.02 , _UpperCamelCase=True , **_UpperCamelCase , ) -> str:
# time series specific configuration
UpperCAmelCase_ : Optional[Any] = prediction_length
UpperCAmelCase_ : List[str] = context_length or prediction_length
UpperCAmelCase_ : List[str] = distribution_output
UpperCAmelCase_ : List[Any] = loss
UpperCAmelCase_ : Tuple = input_size
UpperCAmelCase_ : int = num_time_features
UpperCAmelCase_ : List[Any] = lags_sequence
UpperCAmelCase_ : str = scaling
UpperCAmelCase_ : List[str] = num_dynamic_real_features
UpperCAmelCase_ : Optional[Any] = num_static_real_features
UpperCAmelCase_ : int = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase_ : List[Any] = cardinality
else:
UpperCAmelCase_ : Optional[Any] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase_ : Optional[Any] = embedding_dimension
else:
UpperCAmelCase_ : Optional[int] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase_ : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase_ : List[Any] = input_size * len(_UpperCamelCase ) + self._number_of_features
UpperCAmelCase_ : Tuple = d_model
UpperCAmelCase_ : List[Any] = encoder_attention_heads
UpperCAmelCase_ : Tuple = decoder_attention_heads
UpperCAmelCase_ : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase_ : Optional[int] = decoder_ffn_dim
UpperCAmelCase_ : Any = encoder_layers
UpperCAmelCase_ : List[str] = decoder_layers
UpperCAmelCase_ : List[str] = dropout
UpperCAmelCase_ : Tuple = attention_dropout
UpperCAmelCase_ : Optional[int] = activation_dropout
UpperCAmelCase_ : str = encoder_layerdrop
UpperCAmelCase_ : Optional[int] = decoder_layerdrop
UpperCAmelCase_ : Optional[int] = activation_function
UpperCAmelCase_ : str = init_std
UpperCAmelCase_ : int = use_cache
super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
@property
def __UpperCAmelCase ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 406
| 0
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a__ : Any = logging.get_logger(__name__)
a__ : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
a__ : Dict = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
a__ : Optional[int] = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __snake_case ( ) -> str:
"""simple docstring"""
UpperCAmelCase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
UpperCAmelCase = bs[:]
UpperCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCAmelCase )
cs.append(2**8 + n )
n += 1
UpperCAmelCase = [chr(__UpperCAmelCase ) for n in cs]
return dict(zip(__UpperCAmelCase , __UpperCAmelCase ) )
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase = set()
UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase = char
return pairs
class lowerCAmelCase__ ( _snake_case ):
'''simple docstring'''
_lowerCamelCase =VOCAB_FILES_NAMES
_lowerCamelCase =PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase =['input_ids', 'attention_mask']
def __init__( self : Optional[int] , a__ : List[Any] , a__ : List[Any] , a__ : Tuple="replace" , a__ : Tuple="<s>" , a__ : List[Any]="</s>" , a__ : Optional[Any]="</s>" , a__ : Optional[int]="<s>" , a__ : Optional[int]="<unk>" , a__ : List[Any]="<pad>" , a__ : Dict="<mask>" , a__ : List[str]=False , **a__ : Any , ):
UpperCAmelCase = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else bos_token
UpperCAmelCase = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else eos_token
UpperCAmelCase = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else sep_token
UpperCAmelCase = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else cls_token
UpperCAmelCase = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else unk_token
UpperCAmelCase = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
errors=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , add_prefix_space=a__ , **a__ , )
with open(a__ , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase = json.load(a__ )
UpperCAmelCase = {v: k for k, v in self.encoder.items()}
UpperCAmelCase = errors # how to handle errors in decoding
UpperCAmelCase = bytes_to_unicode()
UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(a__ , encoding='''utf-8''' ) as merges_handle:
UpperCAmelCase = merges_handle.read().split('''\n''' )[1:-1]
UpperCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase = dict(zip(a__ , range(len(a__ ) ) ) )
UpperCAmelCase = {}
UpperCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def __snake_case ( self : List[Any] ):
return len(self.encoder )
def __snake_case ( self : Dict ):
return dict(self.encoder , **self.added_tokens_encoder )
def __snake_case ( self : Optional[int] , a__ : List[Any] ):
if token in self.cache:
return self.cache[token]
UpperCAmelCase = tuple(a__ )
UpperCAmelCase = get_pairs(a__ )
if not pairs:
return token
while True:
UpperCAmelCase = min(a__ , key=lambda a__ : self.bpe_ranks.get(a__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase, UpperCAmelCase = bigram
UpperCAmelCase = []
UpperCAmelCase = 0
while i < len(a__ ):
try:
UpperCAmelCase = word.index(a__ , a__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase = j
if word[i] == first and i < len(a__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase = tuple(a__ )
UpperCAmelCase = new_word
if len(a__ ) == 1:
break
else:
UpperCAmelCase = get_pairs(a__ )
UpperCAmelCase = ''' '''.join(a__ )
UpperCAmelCase = word
return word
def __snake_case ( self : List[str] , a__ : Union[str, Any] ):
UpperCAmelCase = []
for token in re.findall(self.pat , a__ ):
UpperCAmelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a__ ).split(''' ''' ) )
return bpe_tokens
def __snake_case ( self : Optional[Any] , a__ : Union[str, Any] ):
return self.encoder.get(a__ , self.encoder.get(self.unk_token ) )
def __snake_case ( self : Any , a__ : int ):
return self.decoder.get(a__ )
def __snake_case ( self : List[str] , a__ : List[Any] ):
UpperCAmelCase = ''''''.join(a__ )
UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def __snake_case ( self : Tuple , a__ : str , a__ : Optional[str] = None ):
if not os.path.isdir(a__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(a__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a__ , ensure_ascii=a__ ) + '''\n''' )
UpperCAmelCase = 0
with open(a__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a__ : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
UpperCAmelCase = token_index
writer.write(''' '''.join(a__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __snake_case ( self : Any , a__ : List[int] , a__ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self : Tuple , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1, 1] + ([0] * len(a__ )) + [1]
def __snake_case ( self : str , a__ : List[int] , a__ : Optional[List[int]] = None ):
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __snake_case ( self : int , a__ : Optional[Any] , a__ : List[str]=False , **a__ : str ):
UpperCAmelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a__ ) > 0 and not text[0].isspace()):
UpperCAmelCase = ''' ''' + text
return (text, kwargs)
| 713
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ : int = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="maskformer-swin"
_lowerCamelCase ={
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any , a__ : List[str]=224 , a__ : int=4 , a__ : Any=3 , a__ : Optional[int]=96 , a__ : Any=[2, 2, 6, 2] , a__ : Dict=[3, 6, 12, 24] , a__ : Tuple=7 , a__ : List[str]=4.0 , a__ : str=True , a__ : Any=0.0 , a__ : List[str]=0.0 , a__ : Any=0.1 , a__ : List[Any]="gelu" , a__ : Tuple=False , a__ : int=0.02 , a__ : List[str]=1e-5 , a__ : List[Any]=None , a__ : Dict=None , **a__ : str , ):
super().__init__(**a__ )
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = embed_dim
UpperCAmelCase = depths
UpperCAmelCase = len(a__ )
UpperCAmelCase = num_heads
UpperCAmelCase = window_size
UpperCAmelCase = mlp_ratio
UpperCAmelCase = qkv_bias
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = drop_path_rate
UpperCAmelCase = hidden_act
UpperCAmelCase = use_absolute_embeddings
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase = int(embed_dim * 2 ** (len(a__ ) - 1) )
UpperCAmelCase = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(a__ ) + 1 )]
UpperCAmelCase, UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=a__ , out_indices=a__ , stage_names=self.stage_names )
| 570
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCamelCase__( a_ , unittest.TestCase ):
UpperCamelCase : Union[str, Any] = RoCBertTokenizer
UpperCamelCase : Optional[Any] = None
UpperCamelCase : Any = False
UpperCamelCase : Optional[int] = True
UpperCamelCase : Optional[int] = filter_non_english
def __magic_name__ ( self ):
"""simple docstring"""
super().setUp()
__lowercase = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
__lowercase = {}
__lowercase = {}
for i, value in enumerate(__UpperCAmelCase ):
__lowercase = i
__lowercase = i
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""] )
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file , """w""" , encoding="""utf-8""" ) as word_shape_writer:
json.dump(__UpperCAmelCase , __UpperCAmelCase , ensure_ascii=__UpperCAmelCase )
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(__UpperCAmelCase , __UpperCAmelCase , ensure_ascii=__UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__lowercase = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(__UpperCAmelCase , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = RoCBertBasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = RoCBertBasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = RoCBertBasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = RoCBertBasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = RoCBertBasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = RoCBertBasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = RoCBertBasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = RoCBertBasicTokenizer(do_lower_case=__UpperCAmelCase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__lowercase = {}
for i, token in enumerate(__UpperCAmelCase ):
__lowercase = i
__lowercase = RoCBertWordpieceTokenizer(vocab=__UpperCAmelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def __magic_name__ ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def __magic_name__ ( self ):
"""simple docstring"""
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def __magic_name__ ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__UpperCAmelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
__lowercase = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__UpperCAmelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
def __magic_name__ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__lowercase = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__lowercase = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__lowercase = tokenizer_r.encode_plus(
__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , )
__lowercase = tokenizer_r.do_lower_case if hasattr(__UpperCAmelCase , """do_lower_case""" ) else False
__lowercase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """Allen"""),
((2_1, 2_3), """##NL"""),
((2_3, 2_4), """##P"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """allen"""),
((2_1, 2_3), """##nl"""),
((2_3, 2_4), """##p"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = ["""的""", """人""", """有"""]
__lowercase = """""".join(__UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__lowercase = True
__lowercase = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__lowercase = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__lowercase = tokenizer_p.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer_r.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer_r.convert_ids_to_tokens(__UpperCAmelCase )
__lowercase = tokenizer_p.convert_ids_to_tokens(__UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = False
__lowercase = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__lowercase = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__lowercase = tokenizer_r.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer_p.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer_r.convert_ids_to_tokens(__UpperCAmelCase )
__lowercase = tokenizer_p.convert_ids_to_tokens(__UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
__lowercase = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__UpperCAmelCase )
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__lowercase = tokenizer.encode("""你好""" , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer.encode("""你是谁""" , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.get_tokenizers(do_lower_case=__UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__lowercase = """你好,你是谁"""
__lowercase = tokenizer.tokenize(__UpperCAmelCase )
__lowercase = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
__lowercase = tokenizer.convert_tokens_to_shape_ids(__UpperCAmelCase )
__lowercase = tokenizer.convert_tokens_to_pronunciation_ids(__UpperCAmelCase )
__lowercase = tokenizer.prepare_for_model(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer.encode_plus(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 566
|
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
__UpperCamelCase : Optional[Any] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
__UpperCamelCase : List[str] = """sshleifer/student_marian_en_ro_6_1"""
__UpperCamelCase : int = """sshleifer/tiny-mbart"""
@require_torch
class __SCREAMING_SNAKE_CASE( a_ ):
def lowerCAmelCase_ ( self: int , UpperCamelCase: Any=False , UpperCamelCase: Optional[Any]=None , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: int=True , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: List[Any]=True , ) -> Tuple:
snake_case__ = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=UpperCamelCase , num_train_epochs=1 , distributed=UpperCamelCase , extra_args_str=UpperCamelCase , predict_with_generate=UpperCamelCase , do_train=UpperCamelCase , do_eval=UpperCamelCase , do_predict=UpperCamelCase , )
snake_case__ = TrainerState.load_from_json(os.path.join(UpperCamelCase , 'trainer_state.json' ) ).log_history
if not do_eval:
return
snake_case__ = [log for log in logs if 'eval_loss' in log.keys()]
snake_case__ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
snake_case__ = eval_metrics[-1]
assert isinstance(last_step_stats['eval_bleu'] , UpperCamelCase )
assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCAmelCase_ ( self: Optional[int] ) -> Dict:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCAmelCase_ ( self: Any ) -> int:
self.run_seqaseq_quick(distributed=UpperCamelCase )
@require_torch_multi_gpu
def lowerCAmelCase_ ( self: Tuple ) -> int:
self.run_seqaseq_quick(distributed=UpperCamelCase )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]:
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str='--sharded_ddp simple' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self: Any ) -> Any:
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str='--sharded_ddp simple --fp16' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self: int ) -> Tuple:
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str='--sharded_ddp zero_dp_2' , predict_with_generate=UpperCamelCase )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self: Dict ) -> Tuple:
self.run_seqaseq_quick(
distributed=UpperCamelCase , extra_args_str='--sharded_ddp zero_dp_2 --fp16' , predict_with_generate=UpperCamelCase )
@require_apex
@require_torch_gpu
def lowerCAmelCase_ ( self: Tuple ) -> Any:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str='--fp16 --fp16_backend=apex' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str='--fp16 --fp16_backend=apex' )
@parameterized.expand(['base', 'low', 'high', 'mixed'] )
@require_torch_multi_gpu
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: List[str] ) -> str:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
snake_case__ = {
# test with the default log_level - should be info and thus log info once
'base': {'extra_args_str': '', 'n_matches': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0},
}
snake_case__ = experiments[experiment_id]
snake_case__ = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False}
snake_case__ = 'Running training'
with CaptureStderr() as cl:
self.run_seqaseq_quick(**UpperCamelCase , extra_args_str=data['extra_args_str'] )
snake_case__ = len(re.findall(UpperCamelCase , cl.err ) )
self.assertEqual(UpperCamelCase , data['n_matches'] )
@slow
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any:
snake_case__ = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=UpperCamelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=UpperCamelCase , )
# Check metrics
snake_case__ = TrainerState.load_from_json(os.path.join(UpperCamelCase , 'trainer_state.json' ) ).log_history
snake_case__ = [log for log in logs if 'eval_loss' in log.keys()]
snake_case__ = eval_metrics[0]
snake_case__ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['eval_bleu'] , UpperCamelCase )
# test if do_predict saves generations and metrics
snake_case__ = os.listdir(UpperCamelCase )
snake_case__ = {os.path.basename(UpperCamelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCAmelCase_ ( self: int ) -> int:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(UpperCamelCase: str ) -> Tuple[int, float]:
snake_case__ = '--skip_memory_metrics 0'
snake_case__ = self.run_trainer(
max_len=1_28 , model_name=UpperCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=UpperCamelCase , distributed=UpperCamelCase , extra_args_str=UpperCamelCase , do_eval=UpperCamelCase , do_predict=UpperCamelCase , n_gpus_to_use=1 , )
# Check metrics
snake_case__ = TrainerState.load_from_json(Path(UpperCamelCase , 'trainer_state.json' ) ).log_history
snake_case__ = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20 )
snake_case__ = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20 )
snake_case__ = logs[0]['train_loss']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
snake_case__ , snake_case__ , snake_case__ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
snake_case__ , snake_case__ , snake_case__ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
snake_case__ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
snake_case__ = gpu_peak_mem_orig + gpu_alloc_mem_orig
snake_case__ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
snake_case__ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
snake_case__ = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
UpperCamelCase , UpperCamelCase , 'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
UpperCamelCase , UpperCamelCase , 'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
UpperCamelCase , UpperCamelCase , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: int , UpperCamelCase: str , UpperCamelCase: int , UpperCamelCase: float = 3e-3 , UpperCamelCase: str = "adafactor" , UpperCamelCase: bool = False , UpperCamelCase: str = None , UpperCamelCase: int = 0 , UpperCamelCase: bool = True , UpperCamelCase: bool = True , UpperCamelCase: bool = True , UpperCamelCase: bool = True , UpperCamelCase: int = None , ) -> Union[str, Any]:
snake_case__ = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro'
snake_case__ = self.get_auto_remove_tmp_dir()
snake_case__ = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(UpperCamelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(UpperCamelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
snake_case__ = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(UpperCamelCase )}
'''.split()
snake_case__ = '\n --do_predict\n '.split()
snake_case__ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
snake_case__ = get_gpu_count()
snake_case__ = get_torch_dist_unique_port()
snake_case__ = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
snake_case__ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(UpperCamelCase , env=self.get_env() )
else:
snake_case__ = ['run_translation.py'] + args
with patch.object(UpperCamelCase , 'argv' , UpperCamelCase ):
main()
return output_dir
| 328
| 0
|
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
__SCREAMING_SNAKE_CASE : Optional[Any] = namedtuple('covid_data', 'cases deaths recovered')
def _a ( _SCREAMING_SNAKE_CASE = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
snake_case_ = """//div[@class = \"maincounter-number\"]/span/text()"""
return covid_data(*html.fromstring(requests.get(_SCREAMING_SNAKE_CASE ).content ).xpath(_SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE : int = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 2
|
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
if index == number_of_items:
return 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 )
if weights[index] <= max_weight:
snake_case_ = values[index] + knapsack(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 )
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Tuple = StableDiffusionSAGPipeline
_UpperCamelCase : Tuple = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Any = False
def _snake_case ( self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
a__ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
a__ : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case , set_alpha_to_one=snake_case , )
torch.manual_seed(0 )
a__ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
a__ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
a__ : Dict = CLIPTextModel(snake_case )
a__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
a__ : Tuple = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _snake_case ( self , snake_case , snake_case=0 ) -> str:
"""simple docstring"""
if str(snake_case ).startswith("mps" ):
a__ : Tuple = torch.manual_seed(snake_case )
else:
a__ : List[Any] = torch.Generator(device=snake_case ).manual_seed(snake_case )
a__ : Optional[Any] = {
"prompt": ".",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 1.0,
"sag_scale": 1.0,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ) -> str:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Union[str, Any] = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
a__ : Dict = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
a__ : Dict = "."
a__ : List[Any] = torch.manual_seed(0 )
a__ : Optional[Any] = sag_pipe(
[prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
a__ : List[Any] = output.images
a__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a__ : Any = np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : Optional[int] = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
a__ : Tuple = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
a__ : List[str] = "."
a__ : Tuple = torch.manual_seed(0 )
a__ : int = sag_pipe(
[prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
a__ : List[str] = output.images
a__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a__ : List[str] = np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
a__ : List[Any] = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
a__ : str = "."
a__ : Optional[Any] = torch.manual_seed(0 )
a__ : List[str] = sag_pipe(
[prompt] , width=768 , height=512 , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" , )
a__ : List[str] = output.images
assert image.shape == (1, 512, 768, 3)
| 112
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = TransfoXLTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
super().setUp()
a = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCamelCase__ ( self :List[Any] , **__magic_name__ :Optional[int] ):
'''simple docstring'''
a = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :int ):
'''simple docstring'''
a = """<unk> UNwanted , running"""
a = """<unk> unwanted, running"""
return input_text, output_text
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__magic_name__ )
a = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(__magic_name__ , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [0, 4, 8, 7] )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = TransfoXLTokenizer(lower_case=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = TransfoXLTokenizer(lower_case=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = TransfoXLTokenizer(lower_case=__magic_name__ )
a = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
a = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(__magic_name__ ) , __magic_name__ )
self.assertEqual(tokenizer.convert_tokens_to_string(__magic_name__ ) , __magic_name__ )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = self.get_tokenizer()
a = len(__magic_name__ )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__magic_name__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 468
| 0
|
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
SCREAMING_SNAKE_CASE : List[Any] = re.compile(r"^(?P<major>\d+)" r"\.(?P<minor>\d+)" r"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class UpperCamelCase :
'''simple docstring'''
lowercase : str
lowercase : Optional[str] =None
lowercase : Optional[Union[str, int]] =None
lowercase : Optional[Union[str, int]] =None
lowercase : Optional[Union[str, int]] =None
def UpperCamelCase ( self ):
lowercase_ , lowercase_ , lowercase_ :Optional[Any] = _str_to_version_tuple(self.version_str )
def __repr__( self ):
return f"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def UpperCamelCase ( self ):
return self.major, self.minor, self.patch
def UpperCamelCase ( self , UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return Version(UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return other
raise TypeError(f"{other} (type {type(UpperCamelCase_ )}) cannot be compared to version." )
def __eq__( self , UpperCamelCase_ ):
try:
lowercase_ :List[Any] = self._validate_operand(UpperCamelCase_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , UpperCamelCase_ ):
lowercase_ :str = self._validate_operand(UpperCamelCase_ )
return self.tuple < other.tuple
def __hash__( self ):
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def UpperCamelCase ( cls , UpperCamelCase_ ):
lowercase_ :Union[str, Any] = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def UpperCamelCase ( self ):
return self.version_str
def UpperCamelCase ( _a ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ :List[str] = _VERSION_REG.match(_a )
if not res:
raise ValueError(f"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." )
return tuple(int(_a ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def UpperCamelCase ( _a ) -> Tuple:
'''simple docstring'''
return ".".join(str(_a ) for v in version_tuple )
| 441
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
SCREAMING_SNAKE_CASE : int = False
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self ):
return 12
@property
def UpperCamelCase ( self ):
return 12
@property
def UpperCamelCase ( self ):
return 32
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(UpperCamelCase_ )
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :int = 12
lowercase_ :List[Any] = 12
lowercase_ :Dict = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
lowercase_ :int = TransformeraDModel(**UpperCamelCase_ )
return model
def UpperCamelCase ( self ):
lowercase_ :List[str] = '''cpu'''
lowercase_ :int = self.dummy_vqvae
lowercase_ :int = self.dummy_text_encoder
lowercase_ :Any = self.dummy_tokenizer
lowercase_ :Optional[int] = self.dummy_transformer
lowercase_ :List[str] = VQDiffusionScheduler(self.num_embed )
lowercase_ :int = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCamelCase_ )
lowercase_ :List[Any] = VQDiffusionPipeline(
vqvae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , transformer=UpperCamelCase_ , scheduler=UpperCamelCase_ , learned_classifier_free_sampling_embeddings=UpperCamelCase_ , )
lowercase_ :Union[str, Any] = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :Dict = '''teddy bear playing in the pool'''
lowercase_ :Dict = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Optional[int] = pipe([prompt] , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='''np''' )
lowercase_ :Any = output.images
lowercase_ :Tuple = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :str = pipe(
[prompt] , generator=UpperCamelCase_ , output_type='''np''' , return_dict=UpperCamelCase_ , num_inference_steps=2 )[0]
lowercase_ :Optional[Any] = image[0, -3:, -3:, -1]
lowercase_ :Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowercase_ :str = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
lowercase_ :int = '''cpu'''
lowercase_ :Dict = self.dummy_vqvae
lowercase_ :str = self.dummy_text_encoder
lowercase_ :List[Any] = self.dummy_tokenizer
lowercase_ :Any = self.dummy_transformer
lowercase_ :Optional[Any] = VQDiffusionScheduler(self.num_embed )
lowercase_ :List[str] = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCamelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
lowercase_ :Optional[int] = VQDiffusionPipeline(
vqvae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , transformer=UpperCamelCase_ , scheduler=UpperCamelCase_ , learned_classifier_free_sampling_embeddings=UpperCamelCase_ , )
lowercase_ :Dict = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :int = '''teddy bear playing in the pool'''
lowercase_ :Tuple = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Optional[int] = pipe([prompt] , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='''np''' )
lowercase_ :Optional[Any] = output.images
lowercase_ :Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Dict = pipe(
[prompt] , generator=UpperCamelCase_ , output_type='''np''' , return_dict=UpperCamelCase_ , num_inference_steps=2 )[0]
lowercase_ :List[str] = image[0, -3:, -3:, -1]
lowercase_ :Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowercase_ :Dict = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
lowercase_ :List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
lowercase_ :int = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
lowercase_ :Tuple = pipeline.to(UpperCamelCase_ )
pipeline.set_progress_bar_config(disable=UpperCamelCase_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
lowercase_ :Dict = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :int = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=UpperCamelCase_ , output_type='''np''' , )
lowercase_ :List[str] = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 441
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 412
|
def UpperCAmelCase_ ( UpperCAmelCase__ = 1_0_0_0_0_0_0 ):
lowercase_ = set(range(3 , UpperCAmelCase__ , 2 ) )
primes.add(2 )
for p in range(3 , UpperCAmelCase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCAmelCase__ , UpperCAmelCase__ ) ) )
lowercase_ = [float(UpperCAmelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCAmelCase__ , limit + 1 , UpperCAmelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 412
| 1
|
from __future__ import annotations
from collections import Counter
from random import random
class A :
def __init__( self : Dict ) -> str:
"""simple docstring"""
UpperCamelCase_ = {}
def lowercase__ ( self : int , __UpperCAmelCase : str ) -> None:
"""simple docstring"""
UpperCamelCase_ = {}
def lowercase__ ( self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : float ) -> None:
"""simple docstring"""
if nodea not in self.connections:
self.add_node(__UpperCAmelCase )
if nodea not in self.connections:
self.add_node(__UpperCAmelCase )
UpperCamelCase_ = probability
def lowercase__ ( self : Tuple ) -> list[str]:
"""simple docstring"""
return list(self.connections )
def lowercase__ ( self : Dict , __UpperCAmelCase : str ) -> str:
"""simple docstring"""
UpperCamelCase_ = 0
UpperCamelCase_ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def a_ ( __snake_case , __snake_case , __snake_case ) -> dict[str, int]:
'''simple docstring'''
UpperCamelCase_ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__snake_case , __snake_case , __snake_case )
UpperCamelCase_ = Counter(graph.get_nodes() )
UpperCamelCase_ = start
for _ in range(__snake_case ):
UpperCamelCase_ = graph.transition(__snake_case )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__a : Union[str, Any] = TypeVar("""KEY""")
__a : Union[str, Any] = TypeVar("""VAL""")
@dataclass(frozen=lowerCamelCase_ , slots=lowerCamelCase_ )
class A ( Generic[KEY, VAL] ):
_SCREAMING_SNAKE_CASE : KEY
_SCREAMING_SNAKE_CASE : VAL
class A ( _Item ):
def __init__( self : Union[str, Any] ) -> None:
"""simple docstring"""
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __bool__( self : Optional[int] ) -> bool:
"""simple docstring"""
return False
__a : List[Any] = _DeletedItem()
class A ( MutableMapping[KEY, VAL] ):
def __init__( self : int , __UpperCAmelCase : int = 8 , __UpperCAmelCase : float = 0.75 ) -> None:
"""simple docstring"""
UpperCamelCase_ = initial_block_size
UpperCamelCase_ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
UpperCamelCase_ = capacity_factor
UpperCamelCase_ = 0
def lowercase__ ( self : Optional[Any] , __UpperCAmelCase : KEY ) -> int:
"""simple docstring"""
return hash(__UpperCAmelCase ) % len(self._buckets )
def lowercase__ ( self : Any , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
return (ind + 1) % len(self._buckets )
def lowercase__ ( self : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : KEY , __UpperCAmelCase : VAL ) -> bool:
"""simple docstring"""
UpperCamelCase_ = self._buckets[ind]
if not stored:
UpperCamelCase_ = _Item(__UpperCAmelCase , __UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
UpperCamelCase_ = _Item(__UpperCAmelCase , __UpperCAmelCase )
return True
else:
return False
def lowercase__ ( self : List[Any] ) -> bool:
"""simple docstring"""
UpperCamelCase_ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__UpperCAmelCase )
def lowercase__ ( self : Dict ) -> bool:
"""simple docstring"""
if len(self._buckets ) <= self._initial_block_size:
return False
UpperCamelCase_ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def lowercase__ ( self : List[Any] , __UpperCAmelCase : int ) -> None:
"""simple docstring"""
UpperCamelCase_ = self._buckets
UpperCamelCase_ = [None] * new_size
UpperCamelCase_ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def lowercase__ ( self : Union[str, Any] ) -> None:
"""simple docstring"""
self._resize(len(self._buckets ) * 2 )
def lowercase__ ( self : str ) -> None:
"""simple docstring"""
self._resize(len(self._buckets ) // 2 )
def lowercase__ ( self : Any , __UpperCAmelCase : KEY ) -> Iterator[int]:
"""simple docstring"""
UpperCamelCase_ = self._get_bucket_index(__UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
UpperCamelCase_ = self._get_next_ind(__UpperCAmelCase )
def lowercase__ ( self : Optional[int] , __UpperCAmelCase : KEY , __UpperCAmelCase : VAL ) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(__UpperCAmelCase ):
if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
break
def __setitem__( self : Any , __UpperCAmelCase : KEY , __UpperCAmelCase : VAL ) -> None:
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(__UpperCAmelCase , __UpperCAmelCase )
def __delitem__( self : str , __UpperCAmelCase : KEY ) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(__UpperCAmelCase ):
UpperCamelCase_ = self._buckets[ind]
if item is None:
raise KeyError(__UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
UpperCamelCase_ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Tuple , __UpperCAmelCase : KEY ) -> VAL:
"""simple docstring"""
for ind in self._iterate_buckets(__UpperCAmelCase ):
UpperCamelCase_ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__UpperCAmelCase )
def __len__( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self._len
def __iter__( self : Optional[int] ) -> Iterator[KEY]:
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__( self : Dict ) -> str:
"""simple docstring"""
UpperCamelCase_ = ' ,'.join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 559
| 0
|
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> None:
_lowercase : str = len(lowerCamelCase_ )
print('The following activities are selected:' )
# The first activity is always selected
_lowercase : Optional[int] = 0
print(lowerCamelCase_ , end=',' )
# Consider rest of the activities
for j in range(lowerCamelCase_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowerCamelCase_ , end=',' )
_lowercase : Any = j
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Union[str, Any] = [1, 3, 0, 5, 8, 5]
SCREAMING_SNAKE_CASE : Any = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 89
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class UpperCAmelCase ( _snake_case ):
UpperCAmelCase = "imagegpt"
UpperCAmelCase = ["past_key_values"]
UpperCAmelCase = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[Any] , __lowerCamelCase : List[Any]=5_1_2 + 1 , __lowerCamelCase : Dict=3_2 * 3_2 , __lowerCamelCase : List[str]=5_1_2 , __lowerCamelCase : List[Any]=2_4 , __lowerCamelCase : Any=8 , __lowerCamelCase : Tuple=None , __lowerCamelCase : Any="quick_gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=1e-5 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=True , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Dict=False , **__lowerCamelCase : str , ):
UpperCAmelCase__ :Dict = vocab_size
UpperCAmelCase__ :str = n_positions
UpperCAmelCase__ :Tuple = n_embd
UpperCAmelCase__ :Dict = n_layer
UpperCAmelCase__ :List[Any] = n_head
UpperCAmelCase__ :str = n_inner
UpperCAmelCase__ :Optional[Any] = activation_function
UpperCAmelCase__ :str = resid_pdrop
UpperCAmelCase__ :Optional[Any] = embd_pdrop
UpperCAmelCase__ :Tuple = attn_pdrop
UpperCAmelCase__ :int = layer_norm_epsilon
UpperCAmelCase__ :List[Any] = initializer_range
UpperCAmelCase__ :List[Any] = scale_attn_weights
UpperCAmelCase__ :List[str] = use_cache
UpperCAmelCase__ :Tuple = scale_attn_by_inverse_layer_idx
UpperCAmelCase__ :Union[str, Any] = reorder_and_upcast_attn
UpperCAmelCase__ :List[Any] = tie_word_embeddings
super().__init__(tie_word_embeddings=__lowerCamelCase , **__lowerCamelCase )
class UpperCAmelCase ( _snake_case ):
@property
def __SCREAMING_SNAKE_CASE ( self : str ):
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __SCREAMING_SNAKE_CASE ( self : Dict , __lowerCamelCase : "FeatureExtractionMixin" , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional["TensorType"] = None , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 3_2 , __lowerCamelCase : int = 3_2 , ):
UpperCAmelCase__ :Tuple = self._generate_dummy_images(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
UpperCAmelCase__ :Dict = dict(preprocessor(images=__lowerCamelCase , return_tensors=__lowerCamelCase ) )
return inputs
| 467
| 0
|
"""simple docstring"""
def lowercase ( a__ : str , a__ : Dict , a__ : Tuple=False ) -> Optional[Any]:
if isinstance(a__ , a__ ) and isinstance(a__ , a__ ):
_UpperCamelCase = len(set_a.intersection(a__ ) )
if alternative_union:
_UpperCamelCase = len(a__ ) + len(a__ )
else:
_UpperCamelCase = len(set_a.union(a__ ) )
return intersection / union
if isinstance(a__ , (list, tuple) ) and isinstance(a__ , (list, tuple) ):
_UpperCamelCase = [element for element in set_a if element in set_b]
if alternative_union:
_UpperCamelCase = len(a__ ) + len(a__ )
return len(a__ ) / union
else:
_UpperCamelCase = set_a + [element for element in set_b if element not in set_a]
return len(a__ ) / len(a__ )
return len(a__ ) / len(a__ )
return None
if __name__ == "__main__":
UpperCAmelCase = {"""a""", """b""", """c""", """d""", """e"""}
UpperCAmelCase = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 342
|
"""simple docstring"""
from timeit import timeit
UpperCAmelCase = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def lowercase ( a__ : str ) -> bool:
_UpperCamelCase = 0
_UpperCamelCase = len(a__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def lowercase ( a__ : str ) -> bool:
_UpperCamelCase = len(a__ ) // 2
_UpperCamelCase = len(a__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(a__ ) )
def lowercase ( a__ : str ) -> bool:
if len(a__ ) <= 2:
return True
if s[0] == s[len(a__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def lowercase ( a__ : str ) -> bool:
return s == s[::-1]
def lowercase ( a__ : str ) -> None:
_UpperCamelCase = F'''all({name}(key) is value for key, value in test_data.items())'''
_UpperCamelCase = F'''from __main__ import test_data, {name}'''
_UpperCamelCase = 500000
_UpperCamelCase = timeit(stmt=a__ , setup=a__ , number=a__ )
print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F'''{key:21} {value}''')
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""")
| 342
| 1
|
import math
import sys
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
if number != int(_lowerCAmelCase ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
lowerCamelCase__: Dict =[-1] * (number + 1)
lowerCamelCase__: int =0
for i in range(1 , number + 1 ):
lowerCamelCase__: str =sys.maxsize
lowerCamelCase__: str =int(math.sqrt(_lowerCAmelCase ) )
for j in range(1 , root + 1 ):
lowerCamelCase__: Optional[int] =1 + answers[i - (j**2)]
lowerCamelCase__: int =min(_lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__: Union[str, Any] =answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple[int, int]:
"""simple docstring"""
def constraint_to_multiple_of(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0 , _lowerCAmelCase=None ):
A : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
A : Optional[Any] = math.floor(val / multiple ) * multiple
if x < min_val:
A : Any = math.ceil(val / multiple ) * multiple
return x
A : Optional[Any] = (output_size, output_size) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else output_size
A , A : List[Any] = get_image_size(_lowerCAmelCase )
A , A : List[Any] = output_size
# determine new height and width
A : Optional[int] = output_height / input_height
A : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
A : Any = scale_width
else:
# fit height
A : int = scale_height
A : Any = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCAmelCase )
A : int = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCAmelCase )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : int = size if size is not None else {"""height""": 384, """width""": 384}
A : str = get_size_dict(lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Optional[int] = size
A : Union[str, Any] = keep_aspect_ratio
A : int = ensure_multiple_of
A : Dict = resample
A : Optional[Any] = do_rescale
A : Any = rescale_factor
A : str = do_normalize
A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Dict = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
A : Optional[Any] = get_resize_output_image_size(
lowerCamelCase__, output_size=(size["""height"""], size["""width"""]), keep_aspect_ratio=lowerCamelCase__, multiple=lowerCamelCase__, )
return resize(lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A : str = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__ )
A : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
A : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
A : Tuple = resample if resample is not None else self.resample
A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A : int = do_normalize if do_normalize is not None else self.do_normalize
A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
A : Optional[int] = image_std if image_std is not None else self.image_std
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : str = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Dict = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : Optional[Any] = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Dict = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowerCamelCase__ ):
A : int = target_sizes.numpy()
A : Union[str, Any] = []
for idx in range(len(lowerCamelCase__ ) ):
A : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="""bilinear""", align_corners=lowerCamelCase__ )
A : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase__ )
else:
A : List[str] = logits.argmax(dim=1 )
A : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 662
| 0
|
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__SCREAMING_SNAKE_CASE = '\\n Text data.\n Second line of data.'
__SCREAMING_SNAKE_CASE = 'file'
@pytest.fixture(scope='''session''' )
def __a ( lowerCAmelCase__ : List[str] ):
a__ : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
a__ : int = bytes(_lowercase , '''utf-8''' )
with zstd.open(_lowercase , '''wb''' ) as f:
f.write(_lowercase )
return path
@pytest.fixture
def __a ( lowerCAmelCase__ : Any ):
with open(os.path.join(tmpfs.local_root_dir , _lowercase ) , '''w''' ) as f:
f.write(_lowercase )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def __a ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] ):
a__ : List[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
a__ : List[str] = input_paths[compression_format]
a__ : str = tmp_path / '''cache'''
a__ : Union[str, Any] = DownloadConfig(cache_dir=_lowercase , extract_compressed_file=_lowercase )
a__ : int = cached_path(_lowercase , download_config=_lowercase )
with open(_lowercase ) as f:
a__ : str = f.read()
with open(_lowercase ) as f:
a__ : List[str] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def __a ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] ):
a__ : List[Any] = '''custom_cache'''
a__ : Any = '''custom_extracted_dir'''
a__ : Union[str, Any] = tmp_path / '''custom_extracted_path'''
if default_extracted:
a__ : List[Any] = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _lowercase )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_lowercase ) )
a__ : Optional[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
a__ : Union[str, Any] = xz_file
a__ : Optional[int] = (
DownloadConfig(extract_compressed_file=_lowercase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowercase )
)
a__ : List[str] = cached_path(_lowercase , download_config=_lowercase )
assert Path(_lowercase ).parent.parts[-2:] == expected
def __a ( lowerCAmelCase__ : Any ):
a__ : str = str(Path(_lowercase ).resolve() )
assert cached_path(_lowercase ) == text_file
# relative path
a__ : Optional[int] = str(Path(_lowercase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_lowercase ) == text_file
def __a ( lowerCAmelCase__ : List[Any] ):
a__ : Any = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(_lowercase ):
cached_path(_lowercase )
# relative path
a__ : int = '''./__missing_file__.txt'''
with pytest.raises(_lowercase ):
cached_path(_lowercase )
def __a ( lowerCAmelCase__ : Union[str, Any] ):
a__ : Optional[int] = get_from_cache(F'tmp://{tmpfs_file}' )
with open(_lowercase ) as f:
a__ : List[Any] = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase )
def __a ( ):
with pytest.raises(_lowercase ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase )
def __a ( lowerCAmelCase__ : Union[str, Any] ):
a__ : Tuple = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_lowercase ):
http_get('''https://huggingface.co''' , temp_file=_lowercase )
with pytest.raises(_lowercase ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase )
def __a ( lowerCAmelCase__ : Dict ):
a__ : Tuple = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_lowercase ):
ftp_get('''ftp://huggingface.co''' , temp_file=_lowercase )
with pytest.raises(_lowercase ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase )
def __a ( lowerCAmelCase__ : Dict ):
a__ : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_lowercase ):
fsspec_get('''s3://huggingface.co''' , temp_file=_lowercase )
with pytest.raises(_lowercase ):
fsspec_head('''s3://huggingface.co''' )
| 718
|
'''simple docstring'''
from __future__ import annotations
import math
def __a ( lowerCAmelCase__ : float , lowerCAmelCase__ : int ):
a__ : List[str] = u
for i in range(1 , lowerCAmelCase__ ):
a__ : List[Any] = temp * (u - i)
return temp
def __a ( ):
a__ : Optional[int] = int(input('''enter the numbers of values: ''' ) )
a__ : list[list[float]] = []
for _ in range(lowerCAmelCase__ ):
y.append([] )
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
y[i].append(lowerCAmelCase__ )
a__ : Optional[int] = 0
print('''enter the values of parameters in a list: ''' )
a__ : Union[str, Any] = list(map(lowerCAmelCase__ , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(lowerCAmelCase__ ):
a__ : List[str] = float(input() )
a__ : List[str] = int(input('''enter the value to interpolate: ''' ) )
a__ : Optional[int] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowerCAmelCase__ ):
for j in range(n - i ):
a__ : int = y[j + 1][i - 1] - y[j][i - 1]
a__ : Optional[int] = y[0][0]
for i in range(1 , lowerCAmelCase__ ):
summ += (ucal(lowerCAmelCase__ , lowerCAmelCase__ ) * y[0][i]) / math.factorial(lowerCAmelCase__ )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 340
| 0
|
def __lowerCAmelCase ( A_ : int ) -> int:
__UpperCAmelCase = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __lowerCAmelCase ( A_ : int = 1_00 ) -> int:
__UpperCAmelCase = 1
__UpperCAmelCase = 2
for i in range(2 , max_n + 1 ):
__UpperCAmelCase = pre_numerator
__UpperCAmelCase = 2 * i // 3 if i % 3 == 0 else 1
__UpperCAmelCase = cur_numerator
__UpperCAmelCase = e_cont * pre_numerator + temp
return sum_digits(A_ )
if __name__ == "__main__":
print(F"{solution() = }")
| 221
|
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowerCAmelCase ( A_ : Union[str, Any] , A_ : Optional[Any]=10 ) -> Optional[int]:
__UpperCAmelCase = []
for _ in range(A_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowerCAmelCase ( A_ : str , A_ : List[Any]=10 ) -> List[Any]:
__UpperCAmelCase = []
for step in range(A_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase = os.path.join(A_ , "schedule.bin" )
torch.save(scheduler.state_dict() , A_ )
__UpperCAmelCase = torch.load(A_ )
scheduler.load_state_dict(A_ )
return lrs
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self: List[str] , __lowerCAmelCase: List[str] , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: Dict ) -> str:
'''simple docstring'''
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertAlmostEqual(__lowerCAmelCase , __lowerCAmelCase , delta=__lowerCAmelCase )
def _UpperCAmelCase ( self: Optional[int] ) -> str:
'''simple docstring'''
__UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCAmelCase )
__UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
__UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__UpperCAmelCase = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
__UpperCAmelCase = criterion(__lowerCAmelCase , __lowerCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def _UpperCAmelCase ( self: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCAmelCase )
__UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
__UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__UpperCAmelCase = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__lowerCAmelCase , weight_decay=0.0 , relative_step=__lowerCAmelCase , scale_parameter=__lowerCAmelCase , warmup_init=__lowerCAmelCase , )
for _ in range(1_000 ):
__UpperCAmelCase = criterion(__lowerCAmelCase , __lowerCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = nn.Linear(50 , 50 ) if is_torch_available() else None
lowerCAmelCase__ : Optional[int] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowerCAmelCase__ : Optional[int] = 10
def _UpperCAmelCase ( self: Optional[int] , __lowerCAmelCase: List[str] , __lowerCAmelCase: Any , __lowerCAmelCase: List[Any] , __lowerCAmelCase: int=None ) -> List[Any]:
'''simple docstring'''
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertAlmostEqual(__lowerCAmelCase , __lowerCAmelCase , delta=__lowerCAmelCase , msg=__lowerCAmelCase )
def _UpperCAmelCase ( self: Union[str, Any] ) -> Any:
'''simple docstring'''
__UpperCAmelCase = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
__UpperCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
__UpperCAmelCase , __UpperCAmelCase = data
__UpperCAmelCase = scheduler_func(self.optimizer , **__lowerCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
__UpperCAmelCase = unwrap_schedule(__lowerCAmelCase , self.num_steps )
self.assertListAlmostEqual(
__lowerCAmelCase , __lowerCAmelCase , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
__UpperCAmelCase = scheduler_func(self.optimizer , **__lowerCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__lowerCAmelCase ) # wrap to test picklability of the schedule
__UpperCAmelCase = unwrap_and_save_reload_schedule(__lowerCAmelCase , self.num_steps )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase , msg=F'''failed for {scheduler_func} in save and reload''' )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self: Union[str, Any] , __lowerCAmelCase: Tuple ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = fn
def __call__( self: int , *__lowerCAmelCase: List[str] , **__lowerCAmelCase: Any ) -> List[Any]:
'''simple docstring'''
return self.fn(*__lowerCAmelCase , **__lowerCAmelCase )
@classmethod
def _UpperCAmelCase ( self: Optional[Any] , __lowerCAmelCase: List[Any] ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 221
| 1
|
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__a = logging.get_logger(__name__)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Tuple , snake_case_ : str = None , snake_case_ : uuid.UUID = None , snake_case_ : Any=None , snake_case_ : Tuple=None ):
if not conversation_id:
snake_case__ : int = uuid.uuida()
if past_user_inputs is None:
snake_case__ : str = []
if generated_responses is None:
snake_case__ : Dict = []
snake_case__ : uuid.UUID = conversation_id
snake_case__ : List[str] = past_user_inputs
snake_case__ : List[str] = generated_responses
snake_case__ : Optional[str] = text
def __eq__( self : int , snake_case_ : Optional[int] ):
if not isinstance(snake_case_ , snake_case_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCamelCase ( self : Tuple , snake_case_ : str , snake_case_ : bool = False ):
if self.new_user_input:
if overwrite:
logger.warning(
f"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
f"with: \"{text}\"." )
snake_case__ : Union[str, Any] = text
else:
logger.warning(
f"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
f"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
snake_case__ : List[str] = text
def lowerCamelCase ( self : Tuple ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
snake_case__ : Union[str, Any] = None
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : str ):
self.generated_responses.append(snake_case_ )
def lowerCamelCase ( self : List[str] ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : str ):
snake_case__ : Optional[Any] = f"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
snake_case__ : str = """user""" if is_user else """bot"""
output += f"{name} >> {text} \n"
return output
@add_end_docstrings(
_a , r"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : List[Any] , *snake_case_ : Optional[Any] , **snake_case_ : Union[str, Any] ):
super().__init__(*snake_case_ , **snake_case_ )
if self.tokenizer.pad_token_id is None:
snake_case__ : str = self.tokenizer.eos_token
def lowerCamelCase ( self : str , snake_case_ : Optional[Any]=None , snake_case_ : int=None , snake_case_ : List[str]=None , **snake_case_ : List[str] ):
snake_case__ : str = {}
snake_case__ : List[str] = {}
snake_case__ : Any = {}
if min_length_for_response is not None:
snake_case__ : Any = min_length_for_response
if minimum_tokens is not None:
snake_case__ : Any = minimum_tokens
if "max_length" in generate_kwargs:
snake_case__ : Optional[Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
snake_case__ : Tuple = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(snake_case_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , snake_case_ : Union[Conversation, List[Conversation]] , snake_case_ : int=0 , **snake_case_ : int ):
snake_case__ : Union[str, Any] = super().__call__(snake_case_ , num_workers=snake_case_ , **snake_case_ )
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) == 1:
return outputs[0]
return outputs
def lowerCamelCase ( self : List[str] , snake_case_ : Conversation , snake_case_ : List[Any]=32 ):
if not isinstance(snake_case_ , snake_case_ ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
f"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
snake_case__ : str = self.tokenizer._build_conversation_input_ids(snake_case_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
snake_case__ : int = self._legacy_parse_and_tokenize(snake_case_ )
if self.framework == "pt":
snake_case__ : Optional[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
snake_case__ : List[Any] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCamelCase ( self : List[str] , snake_case_ : str , snake_case_ : List[str]=10 , **snake_case_ : Union[str, Any] ):
snake_case__ : Optional[int] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
snake_case__ : List[Any] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
snake_case__ : int = max_length - minimum_tokens
snake_case__ : Optional[int] = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
snake_case__ : List[str] = model_inputs["""attention_mask"""][:, -trim:]
snake_case__ : Optional[Any] = model_inputs.pop("""conversation""" )
snake_case__ : str = max_length
snake_case__ : int = self.model.generate(**snake_case_ , **snake_case_ )
if self.model.config.is_encoder_decoder:
snake_case__ : List[Any] = 1
else:
snake_case__ : Union[str, Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCamelCase ( self : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : Any=True ):
snake_case__ : Optional[int] = model_outputs["""output_ids"""]
snake_case__ : int = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ , )
snake_case__ : Any = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(snake_case_ )
return conversation
def lowerCamelCase ( self : List[str] , snake_case_ : Conversation ):
snake_case__ : Any = self.tokenizer.eos_token_id
snake_case__ : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
if len(snake_case_ ) > self.tokenizer.model_max_length:
snake_case__ : Tuple = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 301
|
'''simple docstring'''
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __snake_case( _lowerCAmelCase = "" ) -> dict[str, float]:
snake_case__ : Tuple = url or """https://www.imdb.com/chart/top/?ref_=nv_mv_250"""
snake_case__ : Optional[int] = BeautifulSoup(requests.get(_lowerCAmelCase ).text , """html.parser""" )
snake_case__ : Optional[int] = soup.find_all("""td""" , attrs="""titleColumn""" )
snake_case__ : List[str] = soup.find_all("""td""" , class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_lowerCAmelCase , _lowerCAmelCase )
}
def __snake_case( _lowerCAmelCase = "IMDb_Top_250_Movies.csv" ) -> None:
snake_case__ : List[Any] = get_imdb_top_aaa_movies()
with open(_lowerCAmelCase , """w""" , newline="""""" ) as out_file:
snake_case__ : int = csv.writer(_lowerCAmelCase )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 301
| 1
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = IFInpaintingSuperResolutionPipeline
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {"latents"}
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any]=0):
'''simple docstring'''
if str(lowercase_).startswith('''mps'''):
SCREAMING_SNAKE_CASE_ : Tuple = torch.manual_seed(lowercase_)
else:
SCREAMING_SNAKE_CASE_ : int = torch.Generator(device=lowercase_).manual_seed(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase_)).to(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_)).to(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_)).to(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''')
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
self._test_save_load_local()
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 512
|
"""simple docstring"""
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 512
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 112
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __lowercase ( ):
SCREAMING_SNAKE_CASE__ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCamelCase_ )
env_command_parser(subparsers=lowerCamelCase_ )
launch_command_parser(subparsers=lowerCamelCase_ )
tpu_command_parser(subparsers=lowerCamelCase_ )
test_command_parser(subparsers=lowerCamelCase_ )
# Let's go
SCREAMING_SNAKE_CASE__ = parser.parse_args()
if not hasattr(lowerCamelCase_ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCamelCase_ )
if __name__ == "__main__":
main()
| 112
| 1
|
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=_A):
"""simple docstring"""
a__ : Any = ["torch", "transformers", "onnx"]
def __init__( self : Union[str, Any] , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : str ) -> int:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case_ ( cls : Optional[Any] , *__lowerCAmelCase : str , **__lowerCAmelCase : int ) -> List[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case_ ( cls : Optional[Any] , *__lowerCAmelCase : Dict , **__lowerCAmelCase : Optional[int] ) -> str:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase__ ( metaclass=_A):
"""simple docstring"""
a__ : str = ["torch", "transformers", "onnx"]
def __init__( self : List[str] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Optional[int] ) -> Dict:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case_ ( cls : Any , *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Tuple ) -> Any:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case_ ( cls : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase__ ( metaclass=_A):
"""simple docstring"""
a__ : Optional[int] = ["torch", "transformers", "onnx"]
def __init__( self : Dict , *__lowerCAmelCase : Dict , **__lowerCAmelCase : int ) -> List[Any]:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case_ ( cls : List[Any] , *__lowerCAmelCase : Dict , **__lowerCAmelCase : Any ) -> Optional[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case_ ( cls : List[str] , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Any ) -> str:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase__ ( metaclass=_A):
"""simple docstring"""
a__ : Optional[int] = ["torch", "transformers", "onnx"]
def __init__( self : Any , *__lowerCAmelCase : int , **__lowerCAmelCase : Optional[int] ) -> Optional[int]:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case_ ( cls : Union[str, Any] , *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : List[str] ) -> str:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case_ ( cls : Tuple , *__lowerCAmelCase : Dict , **__lowerCAmelCase : List[Any] ) -> List[str]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase__ ( metaclass=_A):
"""simple docstring"""
a__ : Optional[Any] = ["torch", "transformers", "onnx"]
def __init__( self : List[Any] , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : str ) -> List[Any]:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case_ ( cls : Union[str, Any] , *__lowerCAmelCase : str , **__lowerCAmelCase : str ) -> Any:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case_ ( cls : Union[str, Any] , *__lowerCAmelCase : Dict , **__lowerCAmelCase : Tuple ) -> Optional[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase__ ( metaclass=_A):
"""simple docstring"""
a__ : Any = ["torch", "transformers", "onnx"]
def __init__( self : Optional[int] , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Tuple ) -> Optional[int]:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case_ ( cls : str , *__lowerCAmelCase : Any , **__lowerCAmelCase : Union[str, Any] ) -> Dict:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case_ ( cls : Dict , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 2
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _lowerCAmelCase ( ) -> Tuple:
"""simple docstring"""
A = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
A = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert("""RGB""" )
return image
def _lowerCAmelCase ( UpperCamelCase__: Union[str, Any] ) -> Dict:
"""simple docstring"""
A = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _lowerCAmelCase ( UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
A = dct.pop(UpperCamelCase__ )
A = val
def _lowerCAmelCase ( UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] ) -> Tuple:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
A = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
A = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
A = torch.cat((q_bias, torch.zeros_like(UpperCamelCase__ , requires_grad=UpperCamelCase__ ), v_bias) )
A = qkv_bias
def _lowerCAmelCase ( UpperCamelCase__: str , UpperCamelCase__: Tuple ) -> Dict:
"""simple docstring"""
A = 3_64 if """coco""" in model_name else 2_24
A = BlipaVisionConfig(image_size=UpperCamelCase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
A = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=UpperCamelCase__ ).to_dict()
elif "opt-6.7b" in model_name:
A = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=UpperCamelCase__ ).to_dict()
elif "t5-xl" in model_name:
A = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
A = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
A = BlipaConfig(vision_config=UpperCamelCase__ , text_config=UpperCamelCase__ )
return config, image_size
@torch.no_grad()
def _lowerCAmelCase ( UpperCamelCase__: str , UpperCamelCase__: Optional[Any]=None , UpperCamelCase__: Tuple=False ) -> List[str]:
"""simple docstring"""
A = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
A = tokenizer("""\n""" , add_special_tokens=UpperCamelCase__ ).input_ids[0]
A , A = get_blipa_config(UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
A = BlipaForConditionalGeneration(UpperCamelCase__ ).eval()
A = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
A , A = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
A = """cuda""" if torch.cuda.is_available() else """cpu"""
A , A , A = load_model_and_preprocess(
name=UpperCamelCase__ , model_type=UpperCamelCase__ , is_eval=UpperCamelCase__ , device=UpperCamelCase__ )
original_model.eval()
print("""Done!""" )
# update state dict keys
A = original_model.state_dict()
A = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
A = state_dict.pop(UpperCamelCase__ )
if key.startswith("""Qformer.bert""" ):
A = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
A = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
A = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
A = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
A = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
A = key.replace("""t5""" , """language""" )
A = val
# read in qv biases
read_in_q_v_bias(UpperCamelCase__ , UpperCamelCase__ )
A , A = hf_model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
A = load_demo_image()
A = vis_processors["""eval"""](UpperCamelCase__ ).unsqueeze(0 ).to(UpperCamelCase__ )
A = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(UpperCamelCase__ )
# create processor
A = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ )
A = BlipaProcessor(image_processor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
A = processor(images=UpperCamelCase__ , return_tensors="""pt""" ).pixel_values.to(UpperCamelCase__ )
# make sure processor creates exact same pixel values
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
hf_model.to(UpperCamelCase__ )
with torch.no_grad():
if "opt" in model_name:
A = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
A = hf_model(UpperCamelCase__ , UpperCamelCase__ ).logits
else:
A = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
A = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
A = hf_model(UpperCamelCase__ , UpperCamelCase__ , labels=UpperCamelCase__ ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
A = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=UpperCamelCase__ )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
A = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=UpperCamelCase__ )
else:
# cast to same type
A = logits.dtype
assert torch.allclose(original_logits.to(UpperCamelCase__ ) , UpperCamelCase__ , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
A = """"""
A = tokenizer(UpperCamelCase__ , return_tensors="""pt""" ).input_ids.to(UpperCamelCase__ )
A = original_model.generate({"""image""": original_pixel_values} )
A = hf_model.generate(
UpperCamelCase__ , UpperCamelCase__ , do_sample=UpperCamelCase__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , UpperCamelCase__ )
A = input_ids.shape[1]
A = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=UpperCamelCase__ )
A = [text.strip() for text in output_text]
print("""HF generation:""" , UpperCamelCase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCamelCase__ )
hf_model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
_lowercase : List[Any] = argparse.ArgumentParser()
_lowercase : List[str] = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
_lowercase : Tuple = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 641
| 0
|
import socket
def UpperCAmelCase__ ( ) -> Union[str, Any]:
__lowercase = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__lowercase = socket.gethostname()
__lowercase = 12_312
sock.connect((host, port) )
sock.send(B"""Hello server!""" )
with open("""Received_file""" , """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
__lowercase = sock.recv(1_024 )
if not data:
break
out_file.write(lowerCAmelCase_ )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 703
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """yolos"""
def __init__( self : Optional[int] , lowercase : Any=768 , lowercase : Tuple=12 , lowercase : Tuple=12 , lowercase : str=3_072 , lowercase : Optional[Any]="gelu" , lowercase : Union[str, Any]=0.0 , lowercase : Dict=0.0 , lowercase : Optional[int]=0.02 , lowercase : Optional[Any]=1E-1_2 , lowercase : Tuple=[512, 864] , lowercase : Optional[int]=16 , lowercase : Dict=3 , lowercase : Optional[Any]=True , lowercase : Optional[int]=100 , lowercase : Optional[int]=True , lowercase : Any=False , lowercase : Any=1 , lowercase : Any=5 , lowercase : List[str]=2 , lowercase : Union[str, Any]=5 , lowercase : str=2 , lowercase : Tuple=0.1 , **lowercase : str , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowercase )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = qkv_bias
__lowercase = num_detection_tokens
__lowercase = use_mid_position_embeddings
__lowercase = auxiliary_loss
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Dict = version.parse("""1.11""" )
@property
def snake_case__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case__ ( self : int ) -> float:
"""simple docstring"""
return 1E-4
@property
def snake_case__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return 12
| 634
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :int = 'bert'
def __init__( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=3_0522 , _lowerCAmelCase : List[str]=768 , _lowerCAmelCase : List[Any]=12 , _lowerCAmelCase : Union[str, Any]=12 , _lowerCAmelCase : List[str]=3072 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Optional[Any]=512 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : Optional[Any]=0.02 , _lowerCAmelCase : Dict=1e-12 , _lowerCAmelCase : Optional[int]=0 , _lowerCAmelCase : Optional[Any]="absolute" , _lowerCAmelCase : str=True , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = classifier_dropout
class __UpperCamelCase ( _lowerCAmelCase ):
@property
def _a ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__lowercase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowercase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 80
|
def UpperCamelCase ( snake_case__ : list ):
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("""The grid does not contain the appropriate information""" )
for cell_n in range(1 ,len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
__snake_case :str = grid[0]
for row_n in range(1 ,len(snake_case__ ) ):
__snake_case :Optional[int] = grid[row_n]
__snake_case :Optional[Any] = fill_row(snake_case__ ,snake_case__ )
__snake_case :Dict = grid[row_n]
return grid[-1][-1]
def UpperCamelCase ( snake_case__ : list ,snake_case__ : list ):
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 ,len(snake_case__ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] ,row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 455
| 0
|
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
| 710
|
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__A = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
__A = dataset.iloc[:, 1:2].values
__A = dataset.iloc[:, 2].values
__A , __A , __A , __A = train_test_split(X, y, test_size=0.2, random_state=0)
__A = PolynomialFeatures(degree=4)
__A = poly_reg.fit_transform(X)
__A = LinearRegression()
pol_reg.fit(X_poly, y)
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
plt.scatter(UpperCamelCase__ , UpperCamelCase__ , color='red' )
plt.plot(UpperCamelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCamelCase__ ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 167
| 0
|
from numpy import exp, pi, sqrt
def A__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
|
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__a = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=1 ) -> Dict:
UpperCAmelCase_ : List[Any] = tokenizer
UpperCAmelCase_ : int = dataset
UpperCAmelCase_ : Dict = len(_SCREAMING_SNAKE_CASE ) if n_tasks is None else n_tasks
UpperCAmelCase_ : Optional[int] = n_copies
def __iter__( self ) -> Any:
UpperCAmelCase_ : List[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : str = start_length
UpperCAmelCase_ : Optional[int] = eof_strings
UpperCAmelCase_ : str = tokenizer
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
UpperCAmelCase_ : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = re.split('''(%s)''' % '''|'''.join(_lowercase ) , _lowercase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=20 , **_lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = defaultdict(_lowercase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowercase ) ):
with torch.no_grad():
UpperCAmelCase_ : Dict = batch['''ids'''].shape[-1]
UpperCAmelCase_ : Optional[Any] = accelerator.unwrap_model(_lowercase ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=_lowercase , **_lowercase )
# each task is generated batch_size times
UpperCAmelCase_ : Union[str, Any] = batch['''task_id'''].repeat(_lowercase )
UpperCAmelCase_ : Dict = accelerator.pad_across_processes(
_lowercase , dim=1 , pad_index=tokenizer.pad_token_id )
UpperCAmelCase_, UpperCAmelCase_ : List[str] = accelerator.gather((generated_tokens, generated_tasks) )
UpperCAmelCase_ : Union[str, Any] = generated_tokens.cpu().numpy()
UpperCAmelCase_ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowercase , _lowercase ):
gen_token_dict[task].append(_lowercase )
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(_lowercase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
UpperCAmelCase_ : int = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
code_gens[task].append(remove_last_block(_lowercase ) )
return code_gens
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = HfArgumentParser(_lowercase )
UpperCAmelCase_ : int = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
UpperCAmelCase_ : Optional[Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
UpperCAmelCase_ : List[Any] = '''false'''
if args.num_workers is None:
UpperCAmelCase_ : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
UpperCAmelCase_ : int = Accelerator()
set_seed(args.seed , device_specific=_lowercase )
# Load model and tokenizer
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase_ : Any = tokenizer.eos_token
UpperCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
UpperCAmelCase_ : str = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowercase , _lowercase )] ),
}
# Load evaluation dataset and metric
UpperCAmelCase_ : Tuple = load_dataset('''openai_humaneval''' )
UpperCAmelCase_ : Dict = load_metric('''code_eval''' )
UpperCAmelCase_ : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
UpperCAmelCase_ : str = args.n_samples // args.batch_size
UpperCAmelCase_ : str = TokenizedDataset(_lowercase , human_eval['''test'''] , n_copies=_lowercase , n_tasks=_lowercase )
# do not confuse args.batch_size, which is actually the num_return_sequences
UpperCAmelCase_ : Optional[Any] = DataLoader(_lowercase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
UpperCAmelCase_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
UpperCAmelCase_, UpperCAmelCase_ : int = accelerator.prepare(_lowercase , _lowercase )
UpperCAmelCase_ : int = complete_code(
_lowercase , _lowercase , _lowercase , _lowercase , n_tasks=_lowercase , batch_size=args.batch_size , **_lowercase , )
if accelerator.is_main_process:
UpperCAmelCase_ : Any = []
for task in tqdm(range(_lowercase ) ):
UpperCAmelCase_ : int = human_eval['''test'''][task]['''test''']
UpperCAmelCase_ : str = f'''check({human_eval["test"][task]["entry_point"]})'''
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
UpperCAmelCase_, UpperCAmelCase_ : Any = code_eval_metric.compute(
references=_lowercase , predictions=_lowercase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(_lowercase , _lowercase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 30
| 0
|
from typing import List
from .keymap import KEYMAP, get_character
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def decorator(lowerCAmelCase__ ):
__A = getattr(lowerCAmelCase__ , "handle_key" , [] )
handle += [key]
setattr(lowerCAmelCase__ , "handle_key" , lowerCAmelCase__ )
return func
return decorator
def UpperCAmelCase ( *lowerCAmelCase__ ):
'''simple docstring'''
def decorator(lowerCAmelCase__ ):
__A = getattr(lowerCAmelCase__ , "handle_key" , [] )
handle += keys
setattr(lowerCAmelCase__ , "handle_key" , lowerCAmelCase__ )
return func
return decorator
class a__ ( __a ):
def __new__( cls , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
__A = super().__new__(cls , snake_case__ , snake_case__ , snake_case__ )
if not hasattr(snake_case__ , "key_handler" ):
setattr(snake_case__ , "key_handler" , {} )
setattr(snake_case__ , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
__A = getattr(snake_case__ , "handle_key" , [] )
for key in handled_keys:
__A = value
return new_cls
@staticmethod
def _lowerCamelCase ( cls ) -> Dict:
__A = get_character()
if char != KEYMAP["undefined"]:
__A = ord(snake_case__ )
__A = cls.key_handler.get(snake_case__ )
if handler:
__A = char
return handler(cls )
else:
return None
def UpperCAmelCase ( cls ):
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 706
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
snake_case_ : str =logging.get_logger(__name__)
snake_case_ : Any ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case_ : Optional[Any] ={
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
snake_case_ : Optional[int] ={
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
snake_case_ : List[str] ={
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ : int = VOCAB_FILES_NAMES
UpperCAmelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Any = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : List[Any] = RealmTokenizer
def __init__( self , lowercase__=None , lowercase__=None , lowercase__=True , lowercase__="[UNK]" , lowercase__="[SEP]" , lowercase__="[PAD]" , lowercase__="[CLS]" , lowercase__="[MASK]" , lowercase__=True , lowercase__=None , **lowercase__ , ) -> str:
super().__init__(
lowercase__ , tokenizer_file=lowercase__ , do_lower_case=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , pad_token=lowercase__ , cls_token=lowercase__ , mask_token=lowercase__ , tokenize_chinese_chars=lowercase__ , strip_accents=lowercase__ , **lowercase__ , )
__A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowercase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowercase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowercase__ ) != tokenize_chinese_chars
):
__A = getattr(lowercase__ , normalizer_state.pop("type" ) )
__A = do_lower_case
__A = strip_accents
__A = tokenize_chinese_chars
__A = normalizer_class(**lowercase__ )
__A = do_lower_case
def _lowerCamelCase ( self , lowercase__ , **lowercase__ ) -> Union[str, Any]:
__A = PaddingStrategy.MAX_LENGTH
__A = text
__A = kwargs.pop("text_pair" , lowercase__ )
__A = kwargs.pop("return_tensors" , lowercase__ )
__A = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(lowercase__ ):
if batch_text_pair is not None:
__A = batch_text_pair[idx]
else:
__A = None
__A = super().__call__(lowercase__ , lowercase__ , return_tensors=lowercase__ , **lowercase__ )
__A = encoded_candidates.get("input_ids" )
__A = encoded_candidates.get("attention_mask" )
__A = encoded_candidates.get("token_type_ids" )
if encoded_input_ids is not None:
output_data["input_ids"].append(lowercase__ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(lowercase__ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(lowercase__ )
__A = {key: item for key, item in output_data.items() if len(lowercase__ ) != 0}
return BatchEncoding(lowercase__ , tensor_type=lowercase__ )
def _lowerCamelCase ( self , lowercase__ , lowercase__=None ) -> Tuple:
__A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self , lowercase__ , lowercase__ = None ) -> List[int]:
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
__A = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
| 205
| 0
|
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
A_ : Union[str, Any] =logging.getLogger()
def snake_case_ ( __snake_case : Path , __snake_case : list) -> Optional[Any]:
lowerCAmelCase_ = '''\n'''.join(__snake_case)
Path(__snake_case).open('''w''').writelines(__snake_case)
A_ : Optional[Any] ='''patrickvonplaten/t5-tiny-random'''
A_ : List[Any] ='''sshleifer/bart-tiny-random'''
A_ : Dict ='''sshleifer/tiny-mbart'''
A_ : Dict =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class __UpperCAmelCase ( __a ):
def UpperCAmelCase_ ( self , _lowerCamelCase ):
lowerCAmelCase_ = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
lowerCAmelCase_ = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
lowerCAmelCase_ = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.''']
_dump_articles(_lowerCamelCase , _lowerCamelCase )
lowerCAmelCase_ = str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' )
lowerCAmelCase_ = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
lowerCAmelCase_ = F'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(_lowerCamelCase , '''argv''' , _lowerCamelCase ):
run_generate()
assert Path(_lowerCamelCase ).exists()
# os.remove(Path(output_file_name))
def UpperCAmelCase_ ( self ):
self.run_eval_tester(_lowerCamelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def UpperCAmelCase_ ( self , _lowerCamelCase ):
self.run_eval_tester(_lowerCamelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def UpperCAmelCase_ ( self , _lowerCamelCase ):
lowerCAmelCase_ = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
lowerCAmelCase_ = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
lowerCAmelCase_ = {
'''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''],
'''de''': [
'''Maschinelles Lernen ist großartig, oder?''',
'''Ich esse gerne Bananen''',
'''Morgen ist wieder ein toller Tag!''',
],
}
lowerCAmelCase_ = Path(self.get_auto_remove_tmp_dir() )
lowerCAmelCase_ = str(tmp_dir / '''scores.json''' )
lowerCAmelCase_ = str(tmp_dir / '''val.target''' )
_dump_articles(_lowerCamelCase , text['''en'''] )
_dump_articles(_lowerCamelCase , text['''de'''] )
lowerCAmelCase_ = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
lowerCAmelCase_ = F'''
run_eval_search.py
{model}
{str(_lowerCamelCase )}
{str(_lowerCamelCase )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] )
with patch.object(_lowerCamelCase , '''argv''' , _lowerCamelCase ):
with CaptureStdout() as cs:
run_search()
lowerCAmelCase_ = [''' num_beams | length_penalty''', model, '''Best score args''']
lowerCAmelCase_ = ['''Info''']
if "translation" in task:
expected_strings.append('''bleu''' )
else:
expected_strings.extend(_lowerCamelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_lowerCamelCase ).exists()
os.remove(Path(_lowerCamelCase ) )
| 274
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Dict =logging.get_logger(__name__)
def snake_case_ ( __snake_case : Any) -> Tuple:
# initialize config
if "resnet-50" in model_name:
lowerCAmelCase_ = ResNetConfig.from_pretrained('''microsoft/resnet-50''')
elif "resnet-101" in model_name:
lowerCAmelCase_ = ResNetConfig.from_pretrained('''microsoft/resnet-101''')
else:
raise ValueError('''Model name should include either resnet50 or resnet101''')
lowerCAmelCase_ = DetrConfig(use_timm_backbone=__snake_case , backbone_config=__snake_case)
# set label attributes
lowerCAmelCase_ = '''panoptic''' in model_name
if is_panoptic:
lowerCAmelCase_ = 250
else:
lowerCAmelCase_ = 91
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = '''coco-detection-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''') , '''r'''))
lowerCAmelCase_ = {int(__snake_case): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def snake_case_ ( __snake_case : Optional[Any]) -> Tuple:
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase_ = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight'''))
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight'''))
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias'''))
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean'''))
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var'''))
# stages
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
))
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
))
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
))
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
))
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
))
# 3 convs
for i in range(3):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
))
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
))
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
))
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
))
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
))
# fmt: on
for i in range(config.encoder_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias'''))
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
))
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
])
return rename_keys
def snake_case_ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any]) -> List[str]:
lowerCAmelCase_ = state_dict.pop(__snake_case)
lowerCAmelCase_ = val
def snake_case_ ( __snake_case : List[Any] , __snake_case : Optional[int]=False) -> str:
lowerCAmelCase_ = ''''''
if is_panoptic:
lowerCAmelCase_ = '''detr.'''
# first: transformer encoder
for i in range(6):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCAmelCase_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''')
lowerCAmelCase_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''')
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[:256, :]
lowerCAmelCase_ = in_proj_bias[:256]
lowerCAmelCase_ = in_proj_weight[256:512, :]
lowerCAmelCase_ = in_proj_bias[256:512]
lowerCAmelCase_ = in_proj_weight[-256:, :]
lowerCAmelCase_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6):
# read in weights + bias of input projection layer of self-attention
lowerCAmelCase_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''')
lowerCAmelCase_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''')
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[:256, :]
lowerCAmelCase_ = in_proj_bias[:256]
lowerCAmelCase_ = in_proj_weight[256:512, :]
lowerCAmelCase_ = in_proj_bias[256:512]
lowerCAmelCase_ = in_proj_weight[-256:, :]
lowerCAmelCase_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowerCAmelCase_ = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''')
lowerCAmelCase_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''')
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowerCAmelCase_ = in_proj_weight_cross_attn[:256, :]
lowerCAmelCase_ = in_proj_bias_cross_attn[:256]
lowerCAmelCase_ = in_proj_weight_cross_attn[256:512, :]
lowerCAmelCase_ = in_proj_bias_cross_attn[256:512]
lowerCAmelCase_ = in_proj_weight_cross_attn[-256:, :]
lowerCAmelCase_ = in_proj_bias_cross_attn[-256:]
def snake_case_ ( ) -> str:
lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ = Image.open(requests.get(__snake_case , stream=__snake_case).raw)
return im
@torch.no_grad()
def snake_case_ ( __snake_case : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : Any=False) -> Optional[Any]:
lowerCAmelCase_ ,lowerCAmelCase_ = get_detr_config(__snake_case)
# load original model from torch hub
lowerCAmelCase_ = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(F'''Converting model {model_name}...''')
lowerCAmelCase_ = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=__snake_case).eval()
lowerCAmelCase_ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__snake_case):
if is_panoptic:
lowerCAmelCase_ = '''detr.''' + src
rename_key(__snake_case , __snake_case , __snake_case)
# query, key and value matrices need special treatment
read_in_q_k_v(__snake_case , is_panoptic=__snake_case)
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCAmelCase_ = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''')
and not key.startswith('''class_labels_classifier''')
and not key.startswith('''bbox_predictor''')
):
lowerCAmelCase_ = state_dict.pop(__snake_case)
lowerCAmelCase_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCAmelCase_ = state_dict.pop(__snake_case)
lowerCAmelCase_ = val
elif key.startswith('''bbox_attention''') or key.startswith('''mask_head'''):
continue
else:
lowerCAmelCase_ = state_dict.pop(__snake_case)
lowerCAmelCase_ = val
else:
if not key.startswith('''class_labels_classifier''') and not key.startswith('''bbox_predictor'''):
lowerCAmelCase_ = state_dict.pop(__snake_case)
lowerCAmelCase_ = val
# finally, create HuggingFace model and load state dict
lowerCAmelCase_ = DetrForSegmentation(__snake_case) if is_panoptic else DetrForObjectDetection(__snake_case)
model.load_state_dict(__snake_case)
model.eval()
# verify our conversion on an image
lowerCAmelCase_ = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
lowerCAmelCase_ = DetrImageProcessor(format=__snake_case)
lowerCAmelCase_ = processor(images=prepare_img() , return_tensors='''pt''')
lowerCAmelCase_ = encoding['''pixel_values''']
lowerCAmelCase_ = detr(__snake_case)
lowerCAmelCase_ = model(__snake_case)
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-3)
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-3)
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''')
Path(__snake_case).mkdir(exist_ok=__snake_case)
model.save_pretrained(__snake_case)
processor.save_pretrained(__snake_case)
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''')
model.push_to_hub(F'''nielsr/{model_name}''')
processor.push_to_hub(F'''nielsr/{model_name}''')
if __name__ == "__main__":
A_ : Any =argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
A_ : Optional[int] =parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 274
| 1
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list , SCREAMING_SNAKE_CASE_: int | None = None , SCREAMING_SNAKE_CASE_: int | None = None ) -> None:
'''simple docstring'''
if start is None:
A__ = 0
if end is None:
A__ = len(SCREAMING_SNAKE_CASE_ ) - 1
if start >= end:
return
A__ = (start + end) // 2
slowsort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
slowsort(SCREAMING_SNAKE_CASE_ , mid + 1 , SCREAMING_SNAKE_CASE_ )
if sequence[end] < sequence[mid]:
A__ , A__ = sequence[mid], sequence[end]
slowsort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 626
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
A__ = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
A__ = model(lowercase )["last_hidden_state"]
A__ = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice.
A__ = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 626
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = ['pixel_values']
def __init__( self : Dict , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : Any , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
lowercase : Tuple =size if size is not None else {'''shortest_edge''': 224}
lowercase : List[str] =get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
lowercase : Dict =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase : Tuple =get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ , param_name='''crop_size''' )
lowercase : Dict =do_resize
lowercase : Tuple =size
lowercase : List[Any] =resample
lowercase : Any =do_center_crop
lowercase : Optional[Any] =crop_size
lowercase : List[Any] =do_rescale
lowercase : List[str] =rescale_factor
lowercase : List[Any] =do_normalize
lowercase : Tuple =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase : Optional[int] =image_std if image_std is not None else OPENAI_CLIP_STD
lowercase : List[Any] =do_convert_rgb
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Tuple , ):
'''simple docstring'''
lowercase : Any =get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase : List[str] =get_resize_output_image_size(UpperCAmelCase__ , size=size['''shortest_edge'''] , default_to_square=UpperCAmelCase__ )
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : List[Any] , ):
'''simple docstring'''
lowercase : List[str] =get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(UpperCAmelCase__ , size=(size['''height'''], size['''width''']) , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Any , ):
'''simple docstring'''
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Union[str, Any] , ):
'''simple docstring'''
return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase__ : str , ):
'''simple docstring'''
lowercase : Union[str, Any] =do_resize if do_resize is not None else self.do_resize
lowercase : Optional[int] =size if size is not None else self.size
lowercase : List[str] =get_size_dict(UpperCAmelCase__ , param_name='''size''' , default_to_square=UpperCAmelCase__ )
lowercase : int =resample if resample is not None else self.resample
lowercase : Optional[int] =do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : Optional[Any] =crop_size if crop_size is not None else self.crop_size
lowercase : int =get_size_dict(UpperCAmelCase__ , param_name='''crop_size''' , default_to_square=UpperCAmelCase__ )
lowercase : Dict =do_rescale if do_rescale is not None else self.do_rescale
lowercase : List[str] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Union[str, Any] =do_normalize if do_normalize is not None else self.do_normalize
lowercase : Union[str, Any] =image_mean if image_mean is not None else self.image_mean
lowercase : List[str] =image_std if image_std is not None else self.image_std
lowercase : Dict =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase : Any =make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase : Tuple =[convert_to_rgb(UpperCAmelCase__ ) for image in images]
# All transformations expect numpy arrays.
lowercase : Any =[to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
lowercase : Dict =[self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_center_crop:
lowercase : Optional[int] =[self.center_crop(image=UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
if do_rescale:
lowercase : int =[self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
lowercase : str =[self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images]
lowercase : Any =[to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
lowercase : Any ={'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 92
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["image_processor", "tokenizer"]
_lowerCamelCase ="CLIPImageProcessor"
_lowerCamelCase =("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Tuple , a__ : List[Any]=None , a__ : str=None , **a__ : Tuple ):
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__( self : Optional[Any] , a__ : Optional[int]=None , a__ : List[str]=None , a__ : int=None , **a__ : Tuple ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
UpperCAmelCase = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def __snake_case ( self : List[str] , *a__ : Union[str, Any] , **a__ : Optional[int] ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def __snake_case ( self : int , *a__ : Optional[int] , **a__ : int ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def __snake_case ( self : str ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __snake_case ( self : Optional[int] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a__ , )
return self.image_processor_class
@property
def __snake_case ( self : List[Any] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a__ , )
return self.image_processor
| 51
| 0
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __lowerCAmelCase( UpperCamelCase__ ):
__snake_case : Optional[Any] = 4_2
class __lowerCAmelCase( nn.Module ):
def __init__( self : int , SCREAMING_SNAKE_CASE : str=3 , SCREAMING_SNAKE_CASE : Optional[Any]=3 , SCREAMING_SNAKE_CASE : str=("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE : Tuple=(64,) , SCREAMING_SNAKE_CASE : List[Any]=2 , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : str="silu" , SCREAMING_SNAKE_CASE : Optional[int]=True , ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ :int = layers_per_block
SCREAMING_SNAKE_CASE_ :Optional[Any] = torch.nn.Convad(
__A , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE_ :Optional[int] = None
SCREAMING_SNAKE_CASE_ :Union[str, Any] = nn.ModuleList([] )
# down
SCREAMING_SNAKE_CASE_ :Tuple = block_out_channels[0]
for i, down_block_type in enumerate(__A ):
SCREAMING_SNAKE_CASE_ :str = output_channel
SCREAMING_SNAKE_CASE_ :Dict = block_out_channels[i]
SCREAMING_SNAKE_CASE_ :Any = i == len(__A ) - 1
SCREAMING_SNAKE_CASE_ :str = get_down_block(
__A , num_layers=self.layers_per_block , in_channels=__A , out_channels=__A , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=__A , resnet_groups=__A , attention_head_dim=__A , temb_channels=__A , )
self.down_blocks.append(__A )
# mid
SCREAMING_SNAKE_CASE_ :List[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=__A , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=__A , temb_channels=__A , )
# out
SCREAMING_SNAKE_CASE_ :Optional[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__A , eps=1E-6 )
SCREAMING_SNAKE_CASE_ :Any = nn.SiLU()
SCREAMING_SNAKE_CASE_ :Any = 2 * out_channels if double_z else out_channels
SCREAMING_SNAKE_CASE_ :Union[str, Any] = nn.Convad(block_out_channels[-1] , __A , 3 , padding=1 )
SCREAMING_SNAKE_CASE_ :Optional[Any] = False
def _lowercase ( self : str , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Tuple = x
SCREAMING_SNAKE_CASE_ :Optional[Any] = self.conv_in(__A )
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE : str ):
def custom_forward(*SCREAMING_SNAKE_CASE : str ):
return module(*__A )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE_ :Optional[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__A ) , __A , use_reentrant=__A )
# middle
SCREAMING_SNAKE_CASE_ :int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __A , use_reentrant=__A )
else:
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE_ :Tuple = torch.utils.checkpoint.checkpoint(create_custom_forward(__A ) , __A )
# middle
SCREAMING_SNAKE_CASE_ :Union[str, Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __A )
else:
# down
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE_ :Any = down_block(__A )
# middle
SCREAMING_SNAKE_CASE_ :List[Any] = self.mid_block(__A )
# post-process
SCREAMING_SNAKE_CASE_ :Optional[Any] = self.conv_norm_out(__A )
SCREAMING_SNAKE_CASE_ :Dict = self.conv_act(__A )
SCREAMING_SNAKE_CASE_ :Optional[Any] = self.conv_out(__A )
return sample
class __lowerCAmelCase( nn.Module ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Optional[Any]=3 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : Optional[int]=("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE : int=(64,) , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : Dict=32 , SCREAMING_SNAKE_CASE : Dict="silu" , SCREAMING_SNAKE_CASE : str="group" , ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ :List[str] = layers_per_block
SCREAMING_SNAKE_CASE_ :Optional[Any] = nn.Convad(
__A , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE_ :Optional[int] = None
SCREAMING_SNAKE_CASE_ :str = nn.ModuleList([] )
SCREAMING_SNAKE_CASE_ :Tuple = in_channels if norm_type == 'spatial' else None
# mid
SCREAMING_SNAKE_CASE_ :Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=__A , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__A , temb_channels=__A , )
# up
SCREAMING_SNAKE_CASE_ :Optional[Any] = list(reversed(__A ) )
SCREAMING_SNAKE_CASE_ :List[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__A ):
SCREAMING_SNAKE_CASE_ :Dict = output_channel
SCREAMING_SNAKE_CASE_ :List[str] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE_ :Tuple = i == len(__A ) - 1
SCREAMING_SNAKE_CASE_ :Any = get_up_block(
__A , num_layers=self.layers_per_block + 1 , in_channels=__A , out_channels=__A , prev_output_channel=__A , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=__A , resnet_groups=__A , attention_head_dim=__A , temb_channels=__A , resnet_time_scale_shift=__A , )
self.up_blocks.append(__A )
SCREAMING_SNAKE_CASE_ :Any = output_channel
# out
if norm_type == "spatial":
SCREAMING_SNAKE_CASE_ :int = SpatialNorm(block_out_channels[0] , __A )
else:
SCREAMING_SNAKE_CASE_ :List[Any] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__A , eps=1E-6 )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = nn.SiLU()
SCREAMING_SNAKE_CASE_ :Any = nn.Convad(block_out_channels[0] , __A , 3 , padding=1 )
SCREAMING_SNAKE_CASE_ :Optional[Any] = False
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Any = z
SCREAMING_SNAKE_CASE_ :Optional[int] = self.conv_in(__A )
SCREAMING_SNAKE_CASE_ :List[str] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE : str ):
def custom_forward(*SCREAMING_SNAKE_CASE : Dict ):
return module(*__A )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
SCREAMING_SNAKE_CASE_ :Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __A , __A , use_reentrant=__A )
SCREAMING_SNAKE_CASE_ :int = sample.to(__A )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE_ :List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__A ) , __A , __A , use_reentrant=__A )
else:
# middle
SCREAMING_SNAKE_CASE_ :int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __A , __A )
SCREAMING_SNAKE_CASE_ :Dict = sample.to(__A )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(__A ) , __A , __A )
else:
# middle
SCREAMING_SNAKE_CASE_ :str = self.mid_block(__A , __A )
SCREAMING_SNAKE_CASE_ :Tuple = sample.to(__A )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE_ :Tuple = up_block(__A , __A )
# post-process
if latent_embeds is None:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.conv_norm_out(__A )
else:
SCREAMING_SNAKE_CASE_ :Optional[Any] = self.conv_norm_out(__A , __A )
SCREAMING_SNAKE_CASE_ :List[str] = self.conv_act(__A )
SCREAMING_SNAKE_CASE_ :Any = self.conv_out(__A )
return sample
class __lowerCAmelCase( nn.Module ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : Optional[Any]="random" , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : Any=True ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ :str = n_e
SCREAMING_SNAKE_CASE_ :str = vq_embed_dim
SCREAMING_SNAKE_CASE_ :Optional[int] = beta
SCREAMING_SNAKE_CASE_ :Union[str, Any] = legacy
SCREAMING_SNAKE_CASE_ :Optional[Any] = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
SCREAMING_SNAKE_CASE_ :Tuple = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
SCREAMING_SNAKE_CASE_ :Optional[Any] = self.used.shape[0]
SCREAMING_SNAKE_CASE_ :List[Any] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
SCREAMING_SNAKE_CASE_ :int = self.re_embed
SCREAMING_SNAKE_CASE_ :List[Any] = self.re_embed + 1
print(
f'Remapping {self.n_e} indices to {self.re_embed} indices. '
f'Using {self.unknown_index} for unknown indices.' )
else:
SCREAMING_SNAKE_CASE_ :Tuple = n_e
SCREAMING_SNAKE_CASE_ :int = sane_index_shape
def _lowercase ( self : str , SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[int] = inds.shape
assert len(__A ) > 1
SCREAMING_SNAKE_CASE_ :Dict = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE_ :List[Any] = self.used.to(__A )
SCREAMING_SNAKE_CASE_ :int = (inds[:, :, None] == used[None, None, ...]).long()
SCREAMING_SNAKE_CASE_ :Dict = match.argmax(-1 )
SCREAMING_SNAKE_CASE_ :int = match.sum(2 ) < 1
if self.unknown_index == "random":
SCREAMING_SNAKE_CASE_ :Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.unknown_index
return new.reshape(__A )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[Any] = inds.shape
assert len(__A ) > 1
SCREAMING_SNAKE_CASE_ :str = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE_ :Optional[int] = self.used.to(__A )
if self.re_embed > self.used.shape[0]: # extra token
SCREAMING_SNAKE_CASE_ :Optional[Any] = 0 # simply set to zero
SCREAMING_SNAKE_CASE_ :int = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __A )
return back.reshape(__A )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :str = z.permute(0 , 2 , 3 , 1 ).contiguous()
SCREAMING_SNAKE_CASE_ :int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
SCREAMING_SNAKE_CASE_ :Dict = torch.argmin(torch.cdist(__A , self.embedding.weight ) , dim=1 )
SCREAMING_SNAKE_CASE_ :List[str] = self.embedding(__A ).view(z.shape )
SCREAMING_SNAKE_CASE_ :str = None
SCREAMING_SNAKE_CASE_ :Union[str, Any] = None
# compute loss for embedding
if not self.legacy:
SCREAMING_SNAKE_CASE_ :Optional[int] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
SCREAMING_SNAKE_CASE_ :Any = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
SCREAMING_SNAKE_CASE_ :List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
SCREAMING_SNAKE_CASE_ :int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
SCREAMING_SNAKE_CASE_ :List[str] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE_ :int = self.remap_to_used(__A )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
SCREAMING_SNAKE_CASE_ :Optional[int] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
if self.remap is not None:
SCREAMING_SNAKE_CASE_ :Dict = indices.reshape(shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE_ :Optional[Any] = self.unmap_to_all(__A )
SCREAMING_SNAKE_CASE_ :Optional[int] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
SCREAMING_SNAKE_CASE_ :Optional[Any] = self.embedding(__A )
if shape is not None:
SCREAMING_SNAKE_CASE_ :int = z_q.view(__A )
# reshape back to match original input shape
SCREAMING_SNAKE_CASE_ :str = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __lowerCAmelCase( UpperCamelCase__ ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Union[str, Any] = parameters
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :List[Any] = torch.chunk(__A , 2 , dim=1 )
SCREAMING_SNAKE_CASE_ :Dict = torch.clamp(self.logvar , -30.0 , 20.0 )
SCREAMING_SNAKE_CASE_ :int = deterministic
SCREAMING_SNAKE_CASE_ :int = torch.exp(0.5 * self.logvar )
SCREAMING_SNAKE_CASE_ :Optional[Any] = torch.exp(self.logvar )
if self.deterministic:
SCREAMING_SNAKE_CASE_ :Any = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[int] = randn_tensor(
self.mean.shape , generator=__A , device=self.parameters.device , dtype=self.parameters.dtype )
SCREAMING_SNAKE_CASE_ :Optional[int] = self.mean + self.std * sample
return x
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int=None ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int=[1, 2, 3] ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
SCREAMING_SNAKE_CASE_ :Optional[Any] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__A )
def _lowercase ( self : Tuple ):
"""simple docstring"""
return self.mean
| 715
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {"""vocab_file""": """sentencepiece.bpe.model"""}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
SCREAMING_SNAKE_CASE__ : Any = {
"""moussaKam/mbarthez""": 10_24,
"""moussaKam/barthez""": 10_24,
"""moussaKam/barthez-orangesum-title""": 10_24,
}
SCREAMING_SNAKE_CASE__ : int = """▁"""
class __lowerCAmelCase( lowerCAmelCase__ ):
__snake_case : Union[str, Any] = VOCAB_FILES_NAMES
__snake_case : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : str = ['input_ids', 'attention_mask']
def __init__( self : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str]="<s>" , SCREAMING_SNAKE_CASE : str="</s>" , SCREAMING_SNAKE_CASE : Dict="</s>" , SCREAMING_SNAKE_CASE : Any="<s>" , SCREAMING_SNAKE_CASE : Dict="<unk>" , SCREAMING_SNAKE_CASE : Any="<pad>" , SCREAMING_SNAKE_CASE : Optional[Any]="<mask>" , SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[Any] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
SCREAMING_SNAKE_CASE_ :Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ :Optional[int] = vocab_file
SCREAMING_SNAKE_CASE_ :List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ :Dict = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
SCREAMING_SNAKE_CASE_ :Optional[int] = len(self.sp_model ) - 1
SCREAMING_SNAKE_CASE_ :Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ :str = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ :str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ :Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowercase ( self : Any ):
"""simple docstring"""
return len(self.sp_model )
def _lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[Any] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE_ :Any = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE )
return spm_id if spm_id else self.unk_token_id
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE )
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[int] = []
SCREAMING_SNAKE_CASE_ :Tuple = ''
SCREAMING_SNAKE_CASE_ :int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE ) + token
SCREAMING_SNAKE_CASE_ :Dict = True
SCREAMING_SNAKE_CASE_ :int = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :int = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE )
return out_string.strip()
def __getstate__( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Dict = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ :List[Any] = None
return state
def __setstate__( self : int , SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ :List[Any] = {}
SCREAMING_SNAKE_CASE_ :Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ :int = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , 'wb' ) as fi:
SCREAMING_SNAKE_CASE_ :List[Any] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 233
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : Optional[Any] = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ['MobileViTFeatureExtractor']
_lowerCamelCase : Union[str, Any] = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 121
|
import os
from math import logaa
def _lowerCAmelCase ( __magic_name__ :str = "base_exp.txt" ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) ):
UpperCAmelCase_, UpperCAmelCase_ = list(map(__magic_name__ , line.split(''',''' ) ) )
if x * logaa(__magic_name__ ) > largest:
UpperCAmelCase_ = x * logaa(__magic_name__ )
UpperCAmelCase_ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 121
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
lowercase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""module.blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""module.blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowercase = ''
else:
lowercase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase = state_dict.pop(f"""module.blocks.{i}.attn.qkv.weight""" )
lowercase = state_dict.pop(f"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase = in_proj_weight[
: config.hidden_size, :
]
lowercase = in_proj_bias[: config.hidden_size]
lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase = in_proj_weight[
-config.hidden_size :, :
]
lowercase = in_proj_bias[-config.hidden_size :]
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
lowercase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
lowercase = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase = dct.pop(_UpperCAmelCase )
lowercase = val
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase = ViTMSNConfig()
lowercase = 10_00
lowercase = 'datasets/huggingface/label-files'
lowercase = 'imagenet-1k-id2label.json'
lowercase = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase ) , 'r' ) )
lowercase = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase = 3_84
lowercase = 15_36
lowercase = 6
elif "l16" in checkpoint_url:
lowercase = 10_24
lowercase = 40_96
lowercase = 24
lowercase = 16
lowercase = 0.1
elif "b4" in checkpoint_url:
lowercase = 4
elif "l7" in checkpoint_url:
lowercase = 7
lowercase = 10_24
lowercase = 40_96
lowercase = 24
lowercase = 16
lowercase = 0.1
lowercase = ViTMSNModel(_UpperCAmelCase )
lowercase = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='cpu' )['target_encoder']
lowercase = ViTImageProcessor(size=config.image_size )
remove_projection_head(_UpperCAmelCase )
lowercase = create_rename_keys(_UpperCAmelCase , base_model=_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase , base_model=_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
lowercase = ViTImageProcessor(
size=config.image_size , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase )
lowercase = image_processor(images=_UpperCAmelCase , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
lowercase = model(**_UpperCAmelCase )
lowercase = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowercase = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowercase = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowercase = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowercase = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _UpperCAmelCase , atol=1e-4 )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__magic_name__ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 314
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 314
| 1
|
"""simple docstring"""
def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
return x if y == 0 else greatest_common_divisor(UpperCAmelCase__ , x % y )
def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
return (x * y) // greatest_common_divisor(UpperCAmelCase__ , UpperCAmelCase__ )
def _a ( UpperCAmelCase__ = 20 ) -> int:
__SCREAMING_SNAKE_CASE = 1
for i in range(1 , n + 1 ):
__SCREAMING_SNAKE_CASE = lcm(UpperCAmelCase__ , UpperCAmelCase__ )
return g
if __name__ == "__main__":
print(F'''{solution() = }''')
| 482
|
"""simple docstring"""
def _a ( UpperCAmelCase__ = 10 ) -> str:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or n < 0:
raise ValueError('''Invalid input''' )
__SCREAMING_SNAKE_CASE = 10**n
__SCREAMING_SNAKE_CASE = 2_84_33 * (pow(2 , 7_83_04_57 , UpperCAmelCase__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(10) = }''')
| 482
| 1
|
from __future__ import annotations
def lowerCamelCase_ ( UpperCAmelCase_ : list[int | float] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> Dict:
'''simple docstring'''
if len(UpperCamelCase__ ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(UpperCamelCase__ )
or left < -len(UpperCamelCase__ )
or right >= len(UpperCamelCase__ )
or right < -len(UpperCamelCase__ )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
_UpperCamelCase : Dict = (left + right) >> 1 # the middle
_UpperCamelCase : Tuple = find_max(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # find max in range[left, mid]
_UpperCamelCase : Any = find_max(UpperCamelCase__ , mid + 1 , UpperCamelCase__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 715
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = """▁"""
lowerCAmelCase__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCAmelCase__ = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
lowerCAmelCase__ = {
"""xlm-roberta-base""": 5_1_2,
"""xlm-roberta-large""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-english""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-german""": 5_1_2,
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["input_ids", "attention_mask"]
def __init__( self , __snake_case , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case = None , **__snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : Dict = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case) if isinstance(__snake_case , __snake_case) else mask_token
_UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
_UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__snake_case))
_UpperCamelCase : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCamelCase : int = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCamelCase : List[Any] = 1
_UpperCamelCase : Any = len(self.sp_model) + self.fairseq_offset
_UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self):
_UpperCamelCase : List[Any] = self.__dict__.copy()
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __snake_case):
_UpperCamelCase : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_UpperCamelCase : Tuple = {}
_UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def A__ ( self , __snake_case , __snake_case = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase : Tuple = [self.cls_token_id]
_UpperCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , __snake_case , __snake_case = None , __snake_case = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case)
if token_ids_a is None:
return [1] + ([0] * len(__snake_case)) + [1]
return [1] + ([0] * len(__snake_case)) + [1, 1] + ([0] * len(__snake_case)) + [1]
def A__ ( self , __snake_case , __snake_case = None):
_UpperCamelCase : Optional[Any] = [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def A__ ( self):
return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token
def A__ ( self):
_UpperCamelCase : List[str] = {self.convert_ids_to_tokens(__snake_case): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A__ ( self , __snake_case):
return self.sp_model.encode(__snake_case , out_type=__snake_case)
def A__ ( self , __snake_case):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCamelCase : str = self.sp_model.PieceToId(__snake_case)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self , __snake_case):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def A__ ( self , __snake_case):
_UpperCamelCase : Optional[int] = ''.join(__snake_case).replace(__snake_case , ' ').strip()
return out_string
def A__ ( self , __snake_case , __snake_case = None):
if not os.path.isdir(__snake_case):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
_UpperCamelCase : str = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(__snake_case) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __snake_case)
elif not os.path.isfile(self.vocab_file):
with open(__snake_case , 'wb') as fi:
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
fi.write(__snake_case)
return (out_vocab_file,)
| 648
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a :List[Any] = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Dict = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 86
|
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase : str = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Any = BartphoTokenizer
UpperCamelCase_ : Dict = False
UpperCamelCase_ : Optional[Any] = True
def lowercase ( self : Tuple ) -> Dict:
super().setUp()
__snake_case = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
__snake_case = dict(zip(A_ , range(len(A_ ) ) ) )
__snake_case = {'''unk_token''': '''<unk>'''}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n" )
__snake_case = BartphoTokenizer(A_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self : List[Any] , **A_ : Optional[int] ) -> Any:
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **A_ )
def lowercase ( self : Optional[Any] , A_ : List[Any] ) -> Tuple:
__snake_case = '''This is a là test'''
__snake_case = '''This is a<unk><unk> test'''
return input_text, output_text
def lowercase ( self : Optional[int] ) -> Dict:
__snake_case = BartphoTokenizer(A_ , self.monolingual_vocab_file , **self.special_tokens_map )
__snake_case = '''This is a là test'''
__snake_case = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
__snake_case = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
| 564
| 0
|
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
SCREAMING_SNAKE_CASE__ : Tuple = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class A_ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , ) -> Dict:
a : Tuple = [file for file in os.listdir(__UpperCAmelCase ) if os.path.isfile(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) )]
if identifier is not None:
a : Optional[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
for n_ in n_identifier:
a : Optional[int] = [file for file in files if n_ not in file]
else:
a : List[Any] = [file for file in files if n_identifier not in file]
a : Any = ignore_files or []
ignore_files.append('__init__.py' )
a : Dict = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , __UpperCAmelCase )
if only_modules:
a : Optional[int] = file.split('.' )[0]
try:
a : Optional[Any] = getattr(__UpperCAmelCase , __UpperCAmelCase )
a : Optional[int] = doctest.DocTestSuite(__UpperCAmelCase )
a : Dict = unittest.TextTestRunner().run(__UpperCAmelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'{module_identifier} is not a module.' )
else:
a : Any = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowercase_ ( self ) -> Dict:
a : Tuple = Path('src/transformers' )
a : List[str] = 'modeling'
a : int = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(__UpperCAmelCase , identifier=__UpperCAmelCase , ignore_files=__UpperCAmelCase )
def lowercase_ ( self ) -> Dict:
a : Tuple = Path('src/transformers' )
a : int = 'tokenization'
self.analyze_directory(__UpperCAmelCase , identifier=__UpperCAmelCase )
def lowercase_ ( self ) -> Optional[int]:
a : List[Any] = Path('src/transformers' )
a : Tuple = 'configuration'
self.analyze_directory(__UpperCAmelCase , identifier=__UpperCAmelCase )
def lowercase_ ( self ) -> List[str]:
a : str = Path('src/transformers' )
a : List[Any] = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(__UpperCAmelCase , n_identifier=__UpperCAmelCase )
def lowercase_ ( self ) -> Tuple:
a : int = Path('docs/source' )
a : Optional[int] = ['favicon.ico']
self.analyze_directory(__UpperCAmelCase , ignore_files=__UpperCAmelCase , only_modules=__UpperCAmelCase )
| 509
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class A_ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowercase_ ( self ) -> Any:
a , a : str = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
a : List[Any] = 'A painting of a squirrel eating a burger'
a : Union[str, Any] = jax.device_count()
a : Optional[int] = num_samples * [prompt]
a : Tuple = sd_pipe.prepare_inputs(__UpperCAmelCase )
a : Dict = replicate(__UpperCAmelCase )
a : int = shard(__UpperCAmelCase )
a : str = jax.random.PRNGKey(0 )
a : List[Any] = jax.random.split(__UpperCAmelCase , jax.device_count() )
a : Optional[int] = sd_pipe(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , num_inference_steps=25 , jit=__UpperCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
a : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
a : Optional[Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
a : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
a : str = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowercase_ ( self ) -> Union[str, Any]:
a : str = 'stabilityai/stable-diffusion-2'
a , a : Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(__UpperCAmelCase , subfolder='scheduler' )
a , a : Any = FlaxStableDiffusionPipeline.from_pretrained(
__UpperCAmelCase , scheduler=__UpperCAmelCase , revision='bf16' , dtype=jnp.bfloataa , )
a : Union[str, Any] = scheduler_params
a : Any = 'A painting of a squirrel eating a burger'
a : Any = jax.device_count()
a : str = num_samples * [prompt]
a : Optional[Any] = sd_pipe.prepare_inputs(__UpperCAmelCase )
a : Optional[Any] = replicate(__UpperCAmelCase )
a : Union[str, Any] = shard(__UpperCAmelCase )
a : Optional[int] = jax.random.PRNGKey(0 )
a : str = jax.random.split(__UpperCAmelCase , jax.device_count() )
a : Tuple = sd_pipe(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , num_inference_steps=25 , jit=__UpperCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
a : Optional[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
a : List[str] = images[0, 2_53:2_56, 2_53:2_56, -1]
a : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
a : Tuple = jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 509
| 1
|
def _SCREAMING_SNAKE_CASE ( lowercase : int = 50_00_00_00 ):
'''simple docstring'''
lowerCamelCase_ = set()
lowerCamelCase_ = int((limit - 24) ** (1 / 2) )
lowerCamelCase_ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowercase ) ) )
for primea in primes:
lowerCamelCase_ = primea * primea
for primea in primes:
lowerCamelCase_ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowerCamelCase_ = primea * primea * primea * primea
lowerCamelCase_ = square + cube + tetr
if total >= limit:
break
ret.add(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 70
|
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
a = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
a = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
a = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[int]=4 , _UpperCAmelCase: Optional[int]=False ):
_lowerCAmelCase :Any = compute_bleu(
reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) :Tuple = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 687
| 0
|
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __UpperCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = image.size
UpperCAmelCase , UpperCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
UpperCAmelCase = np.array(_lowerCAmelCase ).astype(np.floataa ) / 255.0
UpperCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
UpperCAmelCase = torch.from_numpy(_lowerCAmelCase )
return 2.0 * image - 1.0
class __magic_name__ ( _a):
def __init__( self : List[str] ,__SCREAMING_SNAKE_CASE : VQModel ,__SCREAMING_SNAKE_CASE : UNetaDModel ,__SCREAMING_SNAKE_CASE : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] ,):
super().__init__()
self.register_modules(vqvae=__SCREAMING_SNAKE_CASE ,unet=__SCREAMING_SNAKE_CASE ,scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self : Optional[Any] ,__SCREAMING_SNAKE_CASE : Union[torch.Tensor, PIL.Image.Image] = None ,__SCREAMING_SNAKE_CASE : Optional[int] = 1 ,__SCREAMING_SNAKE_CASE : Optional[int] = 1_0_0 ,__SCREAMING_SNAKE_CASE : Optional[float] = 0.0 ,__SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,__SCREAMING_SNAKE_CASE : Optional[str] = "pil" ,__SCREAMING_SNAKE_CASE : bool = True ,):
if isinstance(__SCREAMING_SNAKE_CASE ,PIL.Image.Image ):
UpperCAmelCase = 1
elif isinstance(__SCREAMING_SNAKE_CASE ,torch.Tensor ):
UpperCAmelCase = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__SCREAMING_SNAKE_CASE )}''' )
if isinstance(__SCREAMING_SNAKE_CASE ,PIL.Image.Image ):
UpperCAmelCase = preprocess(__SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
UpperCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
UpperCAmelCase = next(self.unet.parameters() ).dtype
UpperCAmelCase = randn_tensor(__SCREAMING_SNAKE_CASE ,generator=__SCREAMING_SNAKE_CASE ,device=self.device ,dtype=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = image.to(device=self.device ,dtype=__SCREAMING_SNAKE_CASE )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ,device=self.device )
UpperCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase = {}
if accepts_eta:
UpperCAmelCase = eta
for t in self.progress_bar(__SCREAMING_SNAKE_CASE ):
# concat latents and low resolution image in the channel dimension.
UpperCAmelCase = torch.cat([latents, image] ,dim=1 )
UpperCAmelCase = self.scheduler.scale_model_input(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
# predict the noise residual
UpperCAmelCase = self.unet(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ).prev_sample
# decode the image latents with the VQVAE
UpperCAmelCase = self.vqvae.decode(__SCREAMING_SNAKE_CASE ).sample
UpperCAmelCase = torch.clamp(__SCREAMING_SNAKE_CASE ,-1.0 ,1.0 )
UpperCAmelCase = image / 2 + 0.5
UpperCAmelCase = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 405
|
from collections import Counter
from timeit import timeit
def __UpperCamelCase ( _lowerCAmelCase = "" , ):
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def __UpperCamelCase ( _lowerCAmelCase = "" ):
"""simple docstring"""
if len(_lowerCAmelCase ) == 0:
return True
UpperCAmelCase = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
UpperCAmelCase = {}
for character in lower_case_input_str:
UpperCAmelCase = character_freq_dict.get(_lowerCAmelCase , 0 ) + 1
UpperCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __UpperCamelCase ( _lowerCAmelCase = "" ):
"""simple docstring"""
print("\nFor string = " , _lowerCAmelCase , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(_lowerCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(_lowerCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
__lowerCAmelCase =input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
__lowerCAmelCase =can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"{check_str} can {'' if status else 'not '}be rearranged as a palindrome")
| 405
| 1
|
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCamelCase_ (__A ):
__magic_name__ = ['''image_processor''', '''tokenizer''']
__magic_name__ = '''OwlViTImageProcessor'''
__magic_name__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : List[Any] , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : Union[str, Any] ) -> str:
UpperCAmelCase_ : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCAmelCase_ , )
UpperCAmelCase_ : int = kwargs.pop("feature_extractor" )
UpperCAmelCase_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self : Optional[int] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[int]="max_length" , lowerCAmelCase_ : Tuple="np" , **lowerCAmelCase_ : List[Any] ) -> List[str]:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or (isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and not isinstance(text[0] , lowerCAmelCase_ )):
UpperCAmelCase_ : int = [self.tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )]
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(text[0] , lowerCAmelCase_ ):
UpperCAmelCase_ : str = []
# Maximum number of queries across batch
UpperCAmelCase_ : Union[str, Any] = max([len(lowerCAmelCase_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCAmelCase_ ) != max_num_queries:
UpperCAmelCase_ : int = t + [" "] * (max_num_queries - len(lowerCAmelCase_ ))
UpperCAmelCase_ : List[str] = self.tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
encodings.append(lowerCAmelCase_ )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
UpperCAmelCase_ : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
UpperCAmelCase_ : Dict = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase_ : Optional[int] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
UpperCAmelCase_ : Optional[int] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase_ : str = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
UpperCAmelCase_ : Any = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase_ : Optional[Any] = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
UpperCAmelCase_ : Any = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
UpperCAmelCase_ : Union[str, Any] = BatchEncoding()
UpperCAmelCase_ : List[str] = input_ids
UpperCAmelCase_ : Any = attention_mask
if query_images is not None:
UpperCAmelCase_ : Optional[int] = BatchEncoding()
UpperCAmelCase_ : Any = self.image_processor(
lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ).pixel_values
UpperCAmelCase_ : str = query_pixel_values
if images is not None:
UpperCAmelCase_ : Tuple = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is not None and images is not None:
UpperCAmelCase_ : List[str] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase_ : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : int ) -> Optional[Any]:
return self.image_processor.post_process(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Optional[int] ) -> Dict:
return self.image_processor.post_process_object_detection(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Dict ) -> Tuple:
return self.image_processor.post_process_image_guided_detection(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Tuple ) -> List[str]:
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Optional[Any] ) -> int:
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCAmelCase_ , )
return self.image_processor_class
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> Any:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCAmelCase_ , )
return self.image_processor
| 95
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[str]:
"""simple docstring"""
__A = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
__A = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(a_ ):
os.makedirs(a_ )
__A = model.state_dict()
def to_tf_var_name(a_ ):
for patt, repl in iter(a_ ):
__A = name.replace(a_ , a_ )
return F'''bert/{name}'''
def create_tf_var(a_ , a_ , a_ ):
__A = tf.dtypes.as_dtype(tensor.dtype )
__A = tf.get_variable(dtype=a_ , shape=tensor.shape , name=a_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(a_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__A = to_tf_var_name(a_ )
__A = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__A = torch_tensor.T
__A = create_tf_var(tensor=a_ , name=a_ , session=a_ )
tf.keras.backend.set_value(a_ , a_ )
__A = session.run(a_ )
print(F'''Successfully created {tf_name}: {np.allclose(a_ , a_ )}''' )
__A = tf.train.Saver(tf.trainable_variables() )
saver.save(a_ , os.path.join(a_ , model_name.replace("-" , "_" ) + ".ckpt" ) )
def UpperCAmelCase ( a_=None ) -> List[Any]:
"""simple docstring"""
__A = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=a_ , required=a_ , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=a_ , default=a_ , required=a_ , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=a_ , required=a_ , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=a_ , required=a_ , help="Directory in which to save tensorflow model" )
__A = parser.parse_args(a_ )
__A = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=a_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 55
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _lowerCAmelCase ( _lowercase ):
A__ = 42
class _lowerCAmelCase ( _lowercase , _lowercase ):
@register_to_config
def __init__( self , __UpperCAmelCase = 6_5536 , __UpperCAmelCase = None , __UpperCAmelCase = 2 , __UpperCAmelCase = 2 , __UpperCAmelCase = 0 , __UpperCAmelCase = "fourier" , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = 0.0 , __UpperCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , __UpperCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , __UpperCAmelCase = "UNetMidBlock1D" , __UpperCAmelCase = None , __UpperCAmelCase = (32, 32, 64) , __UpperCAmelCase = None , __UpperCAmelCase = 8 , __UpperCAmelCase = 1 , __UpperCAmelCase = False , ):
super().__init__()
lowerCAmelCase__ : Dict = sample_size
# time
if time_embedding_type == "fourier":
lowerCAmelCase__ : str = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=__UpperCAmelCase , log=__UpperCAmelCase , flip_sin_to_cos=__UpperCAmelCase )
lowerCAmelCase__ : int = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowerCAmelCase__ : int = Timesteps(
block_out_channels[0] , flip_sin_to_cos=__UpperCAmelCase , downscale_freq_shift=__UpperCAmelCase )
lowerCAmelCase__ : str = block_out_channels[0]
if use_timestep_embedding:
lowerCAmelCase__ : str = block_out_channels[0] * 4
lowerCAmelCase__ : Dict = TimestepEmbedding(
in_channels=__UpperCAmelCase , time_embed_dim=__UpperCAmelCase , act_fn=__UpperCAmelCase , out_dim=block_out_channels[0] , )
lowerCAmelCase__ : str = nn.ModuleList([] )
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Optional[int] = nn.ModuleList([] )
lowerCAmelCase__ : Optional[Any] = None
# down
lowerCAmelCase__ : List[Any] = in_channels
for i, down_block_type in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ : List[str] = output_channel
lowerCAmelCase__ : Any = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowerCAmelCase__ : Any = i == len(__UpperCAmelCase ) - 1
lowerCAmelCase__ : Any = get_down_block(
__UpperCAmelCase , num_layers=__UpperCAmelCase , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(__UpperCAmelCase )
# mid
lowerCAmelCase__ : Optional[int] = get_mid_block(
__UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=__UpperCAmelCase , add_downsample=__UpperCAmelCase , )
# up
lowerCAmelCase__ : Union[str, Any] = list(reversed(__UpperCAmelCase ) )
lowerCAmelCase__ : List[str] = reversed_block_out_channels[0]
if out_block_type is None:
lowerCAmelCase__ : Optional[int] = out_channels
else:
lowerCAmelCase__ : int = block_out_channels[0]
for i, up_block_type in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = output_channel
lowerCAmelCase__ : Any = (
reversed_block_out_channels[i + 1] if i < len(__UpperCAmelCase ) - 1 else final_upsample_channels
)
lowerCAmelCase__ : Optional[int] = i == len(__UpperCAmelCase ) - 1
lowerCAmelCase__ : Optional[int] = get_up_block(
__UpperCAmelCase , num_layers=__UpperCAmelCase , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(__UpperCAmelCase )
lowerCAmelCase__ : int = output_channel
# out
lowerCAmelCase__ : Union[str, Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowerCAmelCase__ : Dict = get_out_block(
out_block_type=__UpperCAmelCase , num_groups_out=__UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=__UpperCAmelCase , act_fn=__UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True , ):
lowerCAmelCase__ : int = timestep
if not torch.is_tensor(__UpperCAmelCase ):
lowerCAmelCase__ : Any = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(__UpperCAmelCase ) and len(timesteps.shape ) == 0:
lowerCAmelCase__ : Any = timesteps[None].to(sample.device )
lowerCAmelCase__ : int = self.time_proj(__UpperCAmelCase )
if self.config.use_timestep_embedding:
lowerCAmelCase__ : str = self.time_mlp(__UpperCAmelCase )
else:
lowerCAmelCase__ : Tuple = timestep_embed[..., None]
lowerCAmelCase__ : Union[str, Any] = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowerCAmelCase__ : Any = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowerCAmelCase__ : int = ()
for downsample_block in self.down_blocks:
lowerCAmelCase__ , lowerCAmelCase__ : int = downsample_block(hidden_states=__UpperCAmelCase , temb=__UpperCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowerCAmelCase__ : Dict = self.mid_block(__UpperCAmelCase , __UpperCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowerCAmelCase__ : Tuple = down_block_res_samples[-1:]
lowerCAmelCase__ : int = down_block_res_samples[:-1]
lowerCAmelCase__ : Optional[int] = upsample_block(__UpperCAmelCase , res_hidden_states_tuple=__UpperCAmelCase , temb=__UpperCAmelCase )
# 5. post-process
if self.out_block:
lowerCAmelCase__ : Any = self.out_block(__UpperCAmelCase , __UpperCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=__UpperCAmelCase )
| 470
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __lowerCAmelCase ( UpperCamelCase ) -> str:
for param in module.parameters():
lowerCAmelCase__ : int = False
def __lowerCAmelCase ( ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCAmelCase__ : Optional[Any] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def __lowerCAmelCase ( UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : str = plt.imshow(UpperCamelCase )
fig.axes.get_xaxis().set_visible(UpperCamelCase )
fig.axes.get_yaxis().set_visible(UpperCamelCase )
plt.show()
def __lowerCAmelCase ( ) -> str:
lowerCAmelCase__ : Dict = datetime.now()
lowerCAmelCase__ : Optional[int] = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 470
| 1
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a__ ( _lowercase ):
__magic_name__ : List[Any] = ["image_processor", "tokenizer"]
__magic_name__ : Optional[Any] = "ChineseCLIPImageProcessor"
__magic_name__ : Any = ("BertTokenizer", "BertTokenizerFast")
def __init__(self : List[Any], __UpperCAmelCase : List[Any]=None, __UpperCAmelCase : Tuple=None, **__UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', __UpperCAmelCase, )
SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('''feature_extractor''' )
SCREAMING_SNAKE_CASE : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__UpperCAmelCase, __UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = self.image_processor
def __call__(self : Optional[Any], __UpperCAmelCase : Dict=None, __UpperCAmelCase : Optional[int]=None, __UpperCAmelCase : Union[str, Any]=None, **__UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
SCREAMING_SNAKE_CASE : int = self.tokenizer(__UpperCAmelCase, return_tensors=__UpperCAmelCase, **__UpperCAmelCase )
if images is not None:
SCREAMING_SNAKE_CASE : Any = self.image_processor(__UpperCAmelCase, return_tensors=__UpperCAmelCase, **__UpperCAmelCase )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ), tensor_type=__UpperCAmelCase )
def lowercase__ (self : Optional[int], *__UpperCAmelCase : Optional[int], **__UpperCAmelCase : Optional[Any] ) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCAmelCase, **__UpperCAmelCase )
def lowercase__ (self : Optional[Any], *__UpperCAmelCase : Any, **__UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
return self.tokenizer.decode(*__UpperCAmelCase, **__UpperCAmelCase )
@property
def lowercase__ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase__ (self : Dict ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', __UpperCAmelCase, )
return self.image_processor_class
| 507
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 507
| 1
|
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ = "AAPL" ):
UpperCamelCase__ : Tuple = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
UpperCamelCase__ : str = BeautifulSoup(requests.get(UpperCamelCase__ ).text , '''html.parser''' )
UpperCamelCase__ : Any = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 462
|
import doctest
from collections import deque
import numpy as np
class _lowerCamelCase :
"""simple docstring"""
def __init__( self ) -> None:
"""simple docstring"""
UpperCamelCase__ : Tuple = [2, 1, 2, -1]
UpperCamelCase__ : Union[str, Any] = [1, 2, 3, 4]
def __SCREAMING_SNAKE_CASE ( self ) -> list[float]:
"""simple docstring"""
UpperCamelCase__ : List[str] = len(self.first_signal )
UpperCamelCase__ : Any = len(self.second_signal )
UpperCamelCase__ : Dict = max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# create a zero matrix of max_length x max_length
UpperCamelCase__ : List[Any] = [[0] * max_length for i in range(__SCREAMING_SNAKE_CASE )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Optional[int] = deque(self.second_signal )
rotated_signal.rotate(__SCREAMING_SNAKE_CASE )
for j, item in enumerate(__SCREAMING_SNAKE_CASE ):
matrix[i][j] += item
# multiply the matrix with the first signal
UpperCamelCase__ : Optional[Any] = np.matmul(np.transpose(__SCREAMING_SNAKE_CASE ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__SCREAMING_SNAKE_CASE , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 462
| 1
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowercase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(__SCREAMING_SNAKE_CASE , id=__SCREAMING_SNAKE_CASE )
| 84
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class UpperCAmelCase__ ( A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''pixel_values''']
def __init__( self : List[Any] , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 2_55 , UpperCamelCase : bool = True , UpperCamelCase : int = 8 , **UpperCamelCase : Dict , ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
_lowercase : str = do_rescale
_lowercase : int = rescale_factor
_lowercase : Optional[int] = do_pad
_lowercase : Optional[int] = pad_size
def lowerCAmelCase_ ( self : int , UpperCamelCase : np.ndarray , UpperCamelCase : float , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Optional[Any] ):
"""simple docstring"""
return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase : np.ndarray , UpperCamelCase : int , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None ):
"""simple docstring"""
_lowercase , _lowercase : Dict = get_image_size(UpperCamelCase )
_lowercase : Optional[int] = (old_height // size + 1) * size - old_height
_lowercase : str = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=UpperCamelCase )
def lowerCAmelCase_ ( self : List[Any] , UpperCamelCase : ImageInput , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[float] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase : int , ):
"""simple docstring"""
_lowercase : int = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : Optional[int] = do_pad if do_pad is not None else self.do_pad
_lowercase : Any = pad_size if pad_size is not None else self.pad_size
_lowercase : List[Any] = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_lowercase : Optional[int] = [to_numpy_array(UpperCamelCase ) for image in images]
if do_rescale:
_lowercase : str = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images]
if do_pad:
_lowercase : int = [self.pad(UpperCamelCase , size=UpperCamelCase ) for image in images]
_lowercase : str = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images]
_lowercase : Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
| 322
| 0
|
from collections.abc import Generator
from math import sin
def _a ( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
if len(_UpperCAmelCase ) != 32:
raise ValueError('Input must be of length 32' )
_lowerCAmelCase = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _a ( __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_lowerCAmelCase = format(_UpperCAmelCase , '08x' )[-8:]
_lowerCAmelCase = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _a ( __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
_lowerCAmelCase = b''
for char in message:
bit_string += format(_UpperCAmelCase , '08b' ).encode('utf-8' )
_lowerCAmelCase = format(len(_UpperCAmelCase ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_UpperCAmelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _a ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if len(_UpperCAmelCase ) % 512 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(_UpperCAmelCase ) , 512 ):
_lowerCAmelCase = bit_string[pos : pos + 512]
_lowerCAmelCase = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _a ( __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_lowerCAmelCase = format(_UpperCAmelCase , '032b' )
_lowerCAmelCase = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_UpperCAmelCase , 2 )
def _a ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
return (a + b) % 2**32
def _a ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _a ( __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
_lowerCAmelCase = preprocess(_UpperCAmelCase )
_lowerCAmelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_lowerCAmelCase = 0x67_45_23_01
_lowerCAmelCase = 0xEF_CD_AB_89
_lowerCAmelCase = 0x98_BA_DC_FE
_lowerCAmelCase = 0x10_32_54_76
_lowerCAmelCase = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_UpperCAmelCase ):
_lowerCAmelCase = aa
_lowerCAmelCase = ba
_lowerCAmelCase = ca
_lowerCAmelCase = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_lowerCAmelCase = d ^ (b & (c ^ d))
_lowerCAmelCase = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_lowerCAmelCase = c ^ (d & (b ^ c))
_lowerCAmelCase = (5 * i + 1) % 16
elif i <= 47:
_lowerCAmelCase = b ^ c ^ d
_lowerCAmelCase = (3 * i + 5) % 16
else:
_lowerCAmelCase = c ^ (b | not_aa(_UpperCAmelCase ))
_lowerCAmelCase = (7 * i) % 16
_lowerCAmelCase = (f + a + added_consts[i] + block_words[g]) % 2**32
_lowerCAmelCase = d
_lowerCAmelCase = c
_lowerCAmelCase = b
_lowerCAmelCase = sum_aa(_UpperCAmelCase , left_rotate_aa(_UpperCAmelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
_lowerCAmelCase = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase = reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __lowercase( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int]=7 , _lowerCAmelCase : Dict=3 , _lowerCAmelCase : str=30 , _lowerCAmelCase : Tuple=400 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : int=None , _lowerCAmelCase : Any=True , _lowerCAmelCase : Dict=1 / 255 , _lowerCAmelCase : str=True , _lowerCAmelCase : Tuple=[0.5, 0.5, 0.5] , _lowerCAmelCase : Any=[0.5, 0.5, 0.5] , _lowerCAmelCase : Optional[Any]=True , ) -> int:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowerCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean
_lowerCAmelCase = image_std
_lowerCAmelCase = do_pad
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=False ) -> Union[str, Any]:
if not batched:
_lowerCAmelCase = image_inputs[0]
if isinstance(_lowerCAmelCase , Image.Image ):
_lowerCAmelCase , _lowerCAmelCase = image.size
else:
_lowerCAmelCase , _lowerCAmelCase = image.shape[1], image.shape[2]
if w < h:
_lowerCAmelCase = int(self.size['shortest_edge'] * h / w )
_lowerCAmelCase = self.size['shortest_edge']
elif w > h:
_lowerCAmelCase = self.size['shortest_edge']
_lowerCAmelCase = int(self.size['shortest_edge'] * w / h )
else:
_lowerCAmelCase = self.size['shortest_edge']
_lowerCAmelCase = self.size['shortest_edge']
else:
_lowerCAmelCase = []
for image in image_inputs:
_lowerCAmelCase , _lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCAmelCase = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[0] )[0]
_lowerCAmelCase = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowercase( SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = DetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[Any]:
_lowerCAmelCase = DetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[Any]:
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_rescale' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'rescale_factor' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'size' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_pad' ) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _lowerCAmelCase )
_lowerCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_lowerCAmelCase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
pass
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[str]:
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[Any]:
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Any:
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any:
# prepare image and target
_lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
_lowerCAmelCase = json.loads(f.read() )
_lowerCAmelCase = {'image_id': 3_9769, 'annotations': target}
# encode them
_lowerCAmelCase = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
_lowerCAmelCase = image_processing(images=_lowerCAmelCase , annotations=_lowerCAmelCase , return_tensors='pt' )
# verify pixel values
_lowerCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _lowerCAmelCase , atol=1e-4 ) )
# verify area
_lowerCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _lowerCAmelCase ) )
# verify boxes
_lowerCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _lowerCAmelCase , atol=1e-3 ) )
# verify image_id
_lowerCAmelCase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _lowerCAmelCase ) )
# verify is_crowd
_lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _lowerCAmelCase ) )
# verify class_labels
_lowerCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _lowerCAmelCase ) )
# verify orig_size
_lowerCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _lowerCAmelCase ) )
# verify size
_lowerCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _lowerCAmelCase ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[Any]:
# prepare image, target and masks_path
_lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
_lowerCAmelCase = json.loads(f.read() )
_lowerCAmelCase = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
_lowerCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_lowerCAmelCase = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
_lowerCAmelCase = image_processing(images=_lowerCAmelCase , annotations=_lowerCAmelCase , masks_path=_lowerCAmelCase , return_tensors='pt' )
# verify pixel values
_lowerCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _lowerCAmelCase , atol=1e-4 ) )
# verify area
_lowerCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _lowerCAmelCase ) )
# verify boxes
_lowerCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _lowerCAmelCase , atol=1e-3 ) )
# verify image_id
_lowerCAmelCase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _lowerCAmelCase ) )
# verify is_crowd
_lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _lowerCAmelCase ) )
# verify class_labels
_lowerCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _lowerCAmelCase ) )
# verify masks
_lowerCAmelCase = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _lowerCAmelCase )
# verify orig_size
_lowerCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _lowerCAmelCase ) )
# verify size
_lowerCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _lowerCAmelCase ) )
| 585
| 0
|
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
lowerCAmelCase__ = 'scheduler_config.json'
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = 5
@dataclass
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SCHEDULER_CONFIG_NAME
__SCREAMING_SNAKE_CASE = ["dtype"]
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = True
@classmethod
def _lowerCamelCase ( cls , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=False , **__lowerCamelCase , ) -> Optional[Any]:
_A , _A : Optional[int] = cls.load_config(
pretrained_model_name_or_path=__lowerCamelCase , subfolder=__lowerCamelCase , return_unused_kwargs=__lowerCamelCase , **__lowerCamelCase , )
_A , _A : List[Any] = cls.from_config(__lowerCamelCase , return_unused_kwargs=__lowerCamelCase , **__lowerCamelCase)
if hasattr(__lowerCamelCase , "create_state") and getattr(__lowerCamelCase , "has_state" , __lowerCamelCase):
_A : Optional[int] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = False , **__lowerCamelCase) -> Tuple:
self.save_config(save_directory=__lowerCamelCase , push_to_hub=__lowerCamelCase , **__lowerCamelCase)
@property
def _lowerCamelCase ( self) -> Optional[int]:
return self._get_compatibles()
@classmethod
def _lowerCamelCase ( cls) -> Any:
_A : int = list(set([cls.__name__] + cls._compatibles))
_A : Union[str, Any] = importlib.import_module(__name__.split(".")[0])
_A : str = [
getattr(__lowerCamelCase , __lowerCamelCase) for c in compatible_classes_str if hasattr(__lowerCamelCase , __lowerCamelCase)
]
return compatible_classes
def _UpperCAmelCase (UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : Tuple[int] ):
assert len(UpperCamelCase__ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(UpperCamelCase__ ) - x.ndim) ) , UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : List[Any]=0.9_99 , UpperCamelCase__ : List[str]=jnp.floataa ):
def alpha_bar(UpperCamelCase__ : Optional[Any] ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
_A : Tuple = []
for i in range(UpperCamelCase__ ):
_A : List[Any] = i / num_diffusion_timesteps
_A : Optional[int] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(UpperCamelCase__ ) / alpha_bar(UpperCamelCase__ ) , UpperCamelCase__ ) )
return jnp.array(UpperCamelCase__ , dtype=UpperCamelCase__ )
@flax.struct.dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
@classmethod
def _lowerCamelCase ( cls , __lowerCamelCase) -> Tuple:
_A : List[str] = scheduler.config
if config.trained_betas is not None:
_A : Optional[Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype)
elif config.beta_schedule == "linear":
_A : Any = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype)
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_A : str = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype)
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_A : int = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype)
else:
raise NotImplementedError(
F"beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}")
_A : List[str] = 1.0 - betas
_A : List[Any] = jnp.cumprod(__lowerCamelCase , axis=0)
return cls(
alphas=__lowerCamelCase , betas=__lowerCamelCase , alphas_cumprod=__lowerCamelCase , )
def _UpperCAmelCase (UpperCamelCase__ : CommonSchedulerState , UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : jnp.ndarray ):
_A : List[str] = state.alphas_cumprod
_A : List[Any] = alphas_cumprod[timesteps] ** 0.5
_A : Union[str, Any] = sqrt_alpha_prod.flatten()
_A : Optional[Any] = broadcast_to_shape_from_left(UpperCamelCase__ , original_samples.shape )
_A : str = (1 - alphas_cumprod[timesteps]) ** 0.5
_A : List[Any] = sqrt_one_minus_alpha_prod.flatten()
_A : str = broadcast_to_shape_from_left(UpperCamelCase__ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def _UpperCAmelCase (UpperCamelCase__ : CommonSchedulerState , UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : jnp.ndarray ):
_A , _A : int = get_sqrt_alpha_prod(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def _UpperCAmelCase (UpperCamelCase__ : CommonSchedulerState , UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : jnp.ndarray ):
_A , _A : Any = get_sqrt_alpha_prod(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : List[Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 503
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 503
| 1
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase):
if (ksize % 2) == 0:
UpperCamelCase_ = ksize + 1
UpperCamelCase_ = np.zeros((ksize, ksize) , dtype=np.floataa)
# each value
for y in range(__SCREAMING_SNAKE_CASE):
for x in range(__SCREAMING_SNAKE_CASE):
# distance from center
UpperCamelCase_ = x - ksize // 2
UpperCamelCase_ = y - ksize // 2
# degree to radiant
UpperCamelCase_ = theta / 180 * np.pi
UpperCamelCase_ = np.cos(_theta)
UpperCamelCase_ = np.sin(_theta)
# get kernel x
UpperCamelCase_ = cos_theta * px + sin_theta * py
# get kernel y
UpperCamelCase_ = -sin_theta * px + cos_theta * py
# fill kernel
UpperCamelCase_ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2)) * np.cos(2 * np.pi * _x / lambd + psi)
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
snake_case__ : List[str] = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
snake_case__ : Any = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
snake_case__ : int = np.zeros(gray.shape[:2])
for theta in [0, 3_0, 6_0, 9_0, 1_2_0, 1_5_0]:
snake_case__ : Any = gabor_filter_kernel(1_0, 8, theta, 1_0, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
snake_case__ : List[Any] = out / out.max() * 2_5_5
snake_case__ : str = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 704
|
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = parent
def _UpperCAmelCase ( self ) -> Optional[Any]:
return {}
def _snake_case ():
UpperCamelCase_ = '<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
UpperCamelCase_ = '\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class _a ( UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = MarkupLMFeatureExtractor if is_bsa_available() else None
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = MarkupLMFeatureExtractionTester(self )
@property
def _UpperCAmelCase ( self ) -> Any:
return self.feature_extract_tester.prepare_feat_extract_dict()
def _UpperCAmelCase ( self ) -> Dict:
# Initialize feature_extractor
UpperCamelCase_ = self.feature_extraction_class()
# Test not batched input
UpperCamelCase_ = get_html_strings()[0]
UpperCamelCase_ = feature_extractor(_UpperCAmelCase )
# fmt: off
UpperCamelCase_ = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
UpperCamelCase_ = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes , _UpperCAmelCase )
self.assertEqual(encoding.xpaths , _UpperCAmelCase )
# Test batched
UpperCamelCase_ = get_html_strings()
UpperCamelCase_ = feature_extractor(_UpperCAmelCase )
# fmt: off
UpperCamelCase_ = expected_nodes + [['My First Heading', 'My first paragraph.']]
UpperCamelCase_ = expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , _UpperCAmelCase )
self.assertEqual(encoding.xpaths , _UpperCAmelCase )
| 618
| 0
|
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_=0.01 , SCREAMING_SNAKE_CASE_=1000 ) -> str:
UpperCamelCase :Union[str, Any] = p_stop
UpperCamelCase :str = max_length
def __iter__( self ) -> Tuple:
UpperCamelCase :int = 0
UpperCamelCase :Dict = False
while not stop and count < self.max_length:
yield count
count += 1
UpperCamelCase :Dict = random.random() < self.p_stop
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True ) -> Optional[int]:
UpperCamelCase :Optional[int] = [
BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
for i in range(2 )
]
UpperCamelCase :List[Any] = [list(SCREAMING_SNAKE_CASE_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(SCREAMING_SNAKE_CASE_ ) for shard in batch_sampler_shards] , [len(SCREAMING_SNAKE_CASE_ ) for e in expected] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[int]:
# Check the shards when the dataset is a round multiple of total batch size.
UpperCamelCase :int = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCamelCase :Union[str, Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCamelCase :Tuple = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCamelCase :Optional[int] = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
UpperCamelCase :int = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[int]:
# Check the shards when the dataset is a round multiple of batch size.
UpperCamelCase :List[str] = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCamelCase :Any = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCamelCase :Optional[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
UpperCamelCase :Tuple = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
# Check the shards when the dataset is a round multiple of total batch size.
UpperCamelCase :Dict = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCamelCase :int = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCamelCase :Any = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCamelCase :Tuple = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
UpperCamelCase :Optional[int] = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Dict:
# Check the shards when the dataset is a round multiple of batch size.
UpperCamelCase :Dict = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCamelCase :Union[str, Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCamelCase :Optional[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
UpperCamelCase :Union[str, Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase :Tuple = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
UpperCamelCase :Dict = [BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=False ) -> List[Any]:
random.seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = list(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = [
IterableDatasetShard(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , drop_last=SCREAMING_SNAKE_CASE_ , num_processes=SCREAMING_SNAKE_CASE_ , process_index=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , )
for i in range(SCREAMING_SNAKE_CASE_ )
]
UpperCamelCase :Union[str, Any] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(SCREAMING_SNAKE_CASE_ )
iterable_dataset_lists.append(list(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase :List[Any] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
UpperCamelCase :Union[str, Any] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(len(SCREAMING_SNAKE_CASE_ ) % shard_batch_size == 0 )
UpperCamelCase :Tuple = []
for idx in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(SCREAMING_SNAKE_CASE_ ) < len(SCREAMING_SNAKE_CASE_ ):
reference += reference
self.assertListEqual(SCREAMING_SNAKE_CASE_ , reference[: len(SCREAMING_SNAKE_CASE_ )] )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Optional[Any] = 42
UpperCamelCase :Union[str, Any] = RandomIterableDataset()
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Edge case with a very small dataset
UpperCamelCase :str = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Any = BatchSampler(range(16 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = SkipBatchSampler(SCREAMING_SNAKE_CASE_ , 2 )
self.assertListEqual(list(SCREAMING_SNAKE_CASE_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :int = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :int = DataLoader(list(range(16 ) ) , batch_size=4 )
UpperCamelCase :List[str] = skip_first_batches(SCREAMING_SNAKE_CASE_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Union[str, Any] = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def UpperCAmelCase ( self ) -> Optional[int]:
Accelerator()
UpperCamelCase :Optional[int] = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 658
|
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__snake_case = 10
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
for i in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if array[i] == target:
return i
return -1
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :Tuple = 0
UpperCamelCase :Dict = len(SCREAMING_SNAKE_CASE__ )
while left <= right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = (left + right) // 3 + 1
UpperCamelCase :str = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
UpperCamelCase :int = one_third - 1
elif array[two_third] < target:
UpperCamelCase :Any = two_third + 1
else:
UpperCamelCase :Any = one_third + 1
UpperCamelCase :int = two_third - 1
else:
return -1
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
if left < right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[Any] = (left + right) // 3 + 1
UpperCamelCase :Tuple = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(SCREAMING_SNAKE_CASE__ , one_third - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input("""Enter numbers separated by comma:\n""").strip()
__snake_case = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
__snake_case = int(input("""Enter the number to be found in the list:\n""").strip())
__snake_case = ite_ternary_search(collection, target)
__snake_case = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 658
| 1
|
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE_ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
A__ = ProphetNetTokenizer
A__ = False
def __magic_name__ ( self ):
super().setUp()
lowerCamelCase__ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __magic_name__ ( self , _lowerCAmelCase ):
lowerCamelCase__ = "UNwant\u00E9d,running"
lowerCamelCase__ = "unwanted, running"
return input_text, output_text
def __magic_name__ ( self ):
lowerCamelCase__ = self.tokenizer_class(self.vocab_file )
lowerCamelCase__ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_lowerCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def __magic_name__ ( self ):
lowerCamelCase__ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __magic_name__ ( self ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __magic_name__ ( self ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __magic_name__ ( self ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __magic_name__ ( self ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __magic_name__ ( self ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __magic_name__ ( self ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __magic_name__ ( self ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __magic_name__ ( self ):
lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __magic_name__ ( self ):
lowerCamelCase__ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowerCamelCase__ = {}
for i, token in enumerate(_lowerCAmelCase ):
lowerCamelCase__ = i
lowerCamelCase__ = WordpieceTokenizer(vocab=_lowerCAmelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def __magic_name__ ( self ):
lowerCamelCase__ = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
lowerCamelCase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowerCamelCase__ = [1037, 2146, 2_0423, 2005, 7680, 7849, 3989, 1012, 102]
lowerCamelCase__ = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __magic_name__ ( self ):
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __magic_name__ ( self ):
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __magic_name__ ( self ):
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def __magic_name__ ( self ):
lowerCamelCase__ = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
lowerCamelCase__ = tokenizer.encode("sequence builders" , add_special_tokens=_lowerCAmelCase )
lowerCamelCase__ = tokenizer.encode("multi-sequence build" , add_special_tokens=_lowerCAmelCase )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 360
|
def __UpperCamelCase ( a, a, a=False) ->Dict:
if isinstance(a, a) and isinstance(a, a):
lowerCamelCase__ = len(set_a.intersection(a))
if alternative_union:
lowerCamelCase__ = len(a) + len(a)
else:
lowerCamelCase__ = len(set_a.union(a))
return intersection / union
if isinstance(a, (list, tuple)) and isinstance(a, (list, tuple)):
lowerCamelCase__ = [element for element in set_a if element in set_b]
if alternative_union:
lowerCamelCase__ = len(a) + len(a)
return len(a) / union
else:
lowerCamelCase__ = set_a + [element for element in set_b if element not in set_a]
return len(a) / len(a)
return len(a) / len(a)
return None
if __name__ == "__main__":
A_ = {"a", "b", "c", "d", "e"}
A_ = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 360
| 1
|
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = OmegaConf.load(UpperCamelCase__ )
UpperCAmelCase = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model''']
UpperCAmelCase = list(state_dict.keys() )
# extract state_dict for VQVAE
UpperCAmelCase = {}
UpperCAmelCase = '''first_stage_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
UpperCAmelCase = state_dict[key]
# extract state_dict for UNetLDM
UpperCAmelCase = {}
UpperCAmelCase = '''model.diffusion_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
UpperCAmelCase = state_dict[key]
UpperCAmelCase = config.model.params.first_stage_config.params
UpperCAmelCase = config.model.params.unet_config.params
UpperCAmelCase = VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
UpperCAmelCase = UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
UpperCAmelCase = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , )
UpperCAmelCase = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A : List[Any] = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", type=str, required=True)
parser.add_argument("--config_path", type=str, required=True)
parser.add_argument("--output_path", type=str, required=True)
__A : Optional[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 130
|
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str:
'''simple docstring'''
UpperCAmelCase = []
for line in lines:
UpperCAmelCase = re.sub(R'''#.*''' , '''''' , UpperCamelCase__ ) # remove comments
if line:
filtered_lines.append(UpperCamelCase__ )
UpperCAmelCase = '''\n'''.join(UpperCamelCase__ )
# Make a hash from all this code
UpperCAmelCase = full_str.encode('''utf-8''' )
return shaaaa(UpperCamelCase__ ).hexdigest()
# get importable module names and hash for caching
__A : List[str] = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__A : Tuple = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__A : int = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
__A : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(".zip")
_MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
| 130
| 1
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case ( __UpperCAmelCase ):
pass
class snake_case :
def __init__( self :List[Any] , _lowerCamelCase :Any ):
__SCREAMING_SNAKE_CASE : Any = data
__SCREAMING_SNAKE_CASE : Node | None = None
def __iter__( self :Tuple ):
__SCREAMING_SNAKE_CASE : Tuple = self
__SCREAMING_SNAKE_CASE : List[str] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(_lowerCamelCase )
yield node.data
__SCREAMING_SNAKE_CASE : Optional[int] = node.next_node
@property
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
_lowerCamelCase = Node(1)
_lowerCamelCase = Node(2)
_lowerCamelCase = Node(3)
_lowerCamelCase = Node(4)
print(root_node.has_loop) # False
_lowerCamelCase = root_node.next_node
print(root_node.has_loop) # True
_lowerCamelCase = Node(5)
_lowerCamelCase = Node(6)
_lowerCamelCase = Node(5)
_lowerCamelCase = Node(6)
print(root_node.has_loop) # False
_lowerCamelCase = Node(1)
print(root_node.has_loop) # False
| 705
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class snake_case ( __UpperCAmelCase ):
lowerCamelCase__ = '''SpeechT5FeatureExtractor'''
lowerCamelCase__ = '''SpeechT5Tokenizer'''
def __init__( self :List[Any] , _lowerCamelCase :Optional[int] , _lowerCamelCase :str ):
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__( self :Optional[int] , *_lowerCamelCase :Dict , **_lowerCamelCase :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Any = kwargs.pop('''audio''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = kwargs.pop('''text''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = kwargs.pop('''text_target''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('''audio_target''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = kwargs.pop('''sampling_rate''' , _lowerCamelCase )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase )
elif text is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE : Tuple = None
if audio_target is not None:
__SCREAMING_SNAKE_CASE : Tuple = self.feature_extractor(audio_target=_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = targets['''input_values''']
elif text_target is not None:
__SCREAMING_SNAKE_CASE : str = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = targets['''input_ids''']
else:
__SCREAMING_SNAKE_CASE : List[Any] = None
if inputs is None:
return targets
if targets is not None:
__SCREAMING_SNAKE_CASE : int = labels
__SCREAMING_SNAKE_CASE : Dict = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
__SCREAMING_SNAKE_CASE : Any = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , *_lowerCamelCase :Dict , **_lowerCamelCase :Any ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop('''input_values''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('''input_ids''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = kwargs.pop('''labels''' , _lowerCamelCase )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extractor.pad(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
elif input_ids is not None:
__SCREAMING_SNAKE_CASE : int = self.tokenizer.pad(_lowerCamelCase , **_lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE : Any = None
if labels is not None:
if "input_ids" in labels or (isinstance(_lowerCamelCase , _lowerCamelCase ) and "input_ids" in labels[0]):
__SCREAMING_SNAKE_CASE : Any = self.tokenizer.pad(_lowerCamelCase , **_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = targets['''input_ids''']
else:
__SCREAMING_SNAKE_CASE : Any = self.feature_extractor.feature_size
__SCREAMING_SNAKE_CASE : Any = self.feature_extractor.num_mel_bins
__SCREAMING_SNAKE_CASE : Any = self.feature_extractor.pad(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = feature_size_hack
__SCREAMING_SNAKE_CASE : Any = targets['''input_values''']
else:
__SCREAMING_SNAKE_CASE : Dict = None
if inputs is None:
return targets
if targets is not None:
__SCREAMING_SNAKE_CASE : List[Any] = labels
__SCREAMING_SNAKE_CASE : int = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
__SCREAMING_SNAKE_CASE : List[Any] = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE_ ( self :Tuple , *_lowerCamelCase :Tuple , **_lowerCamelCase :Union[str, Any] ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , *_lowerCamelCase :List[Any] , **_lowerCamelCase :List[str] ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
| 401
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = (DDPMScheduler,)
def _lowerCamelCase ( self , **_UpperCAmelCase ):
__a : int = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_UpperCAmelCase )
return config
def _lowerCamelCase ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCAmelCase )
def _lowerCamelCase ( self ):
self.check_over_configs(thresholding=_UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCAmelCase , prediction_type=_UpperCAmelCase , sample_max_value=_UpperCAmelCase , )
def _lowerCamelCase ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Dict = scheduler_class(**_UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def _lowerCamelCase ( self ):
__a : int = self.scheduler_classes[0]
__a : int = self.get_scheduler_config()
__a : Optional[Any] = scheduler_class(**_UpperCAmelCase )
__a : int = len(_UpperCAmelCase )
__a : List[str] = self.dummy_model()
__a : List[Any] = self.dummy_sample_deter
__a : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
__a : Optional[int] = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__a : Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a : List[Any] = pred_prev_sample
__a : int = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : Union[str, Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def _lowerCamelCase ( self ):
__a : Dict = self.scheduler_classes[0]
__a : int = self.get_scheduler_config(prediction_type='''v_prediction''' )
__a : int = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = len(_UpperCAmelCase )
__a : List[str] = self.dummy_model()
__a : List[str] = self.dummy_sample_deter
__a : str = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
__a : Dict = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__a : Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a : Optional[int] = pred_prev_sample
__a : Optional[int] = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : int = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Any = self.get_scheduler_config()
__a : str = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
__a : List[Any] = scheduler.timesteps
for i, timestep in enumerate(_UpperCAmelCase ):
if i == len(_UpperCAmelCase ) - 1:
__a : Union[str, Any] = -1
else:
__a : str = timesteps[i + 1]
__a : Dict = scheduler.previous_timestep(_UpperCAmelCase )
__a : str = prev_t.item()
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Tuple = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Any = scheduler_class(**_UpperCAmelCase )
__a : Optional[Any] = [100, 87, 50, 51, 0]
with self.assertRaises(_UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : Any = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = [100, 87, 50, 1, 0]
__a : Optional[int] = len(_UpperCAmelCase )
with self.assertRaises(_UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_UpperCAmelCase , timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : List[str] = scheduler_class(**_UpperCAmelCase )
__a : List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
| 52
| 1
|
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCamelCase = getLogger(__name__)
lowerCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 8 , lowerCAmelCase__ = DEFAULT_DEVICE , lowerCAmelCase__=False , lowerCAmelCase__="summarization" , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
UpperCAmelCase_ = Path(_SCREAMING_SNAKE_CASE ).open("w" , encoding="utf-8" )
UpperCAmelCase_ = str(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
if fpaa:
UpperCAmelCase_ = model.half()
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
UpperCAmelCase_ = time.time()
# update config with task specific params
use_task_specific_params(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if prefix is None:
UpperCAmelCase_ = prefix or getattr(model.config , "prefix" , "" ) or ""
for examples_chunk in tqdm(list(chunks(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) ):
UpperCAmelCase_ = [prefix + text for text in examples_chunk]
UpperCAmelCase_ = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors="pt" , truncation=_SCREAMING_SNAKE_CASE , padding="longest" ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
for hypothesis in dec:
fout.write(hypothesis + "\n" )
fout.flush()
fout.close()
UpperCAmelCase_ = int(time.time() - start_time ) # seconds
UpperCAmelCase_ = len(_SCREAMING_SNAKE_CASE )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def a__ ( ):
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )
def a__ ( lowerCAmelCase__=True ):
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("model_name" , type=_SCREAMING_SNAKE_CASE , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("input_path" , type=_SCREAMING_SNAKE_CASE , help="like cnn_dm/test.source" )
parser.add_argument("save_path" , type=_SCREAMING_SNAKE_CASE , help="where to save summaries" )
parser.add_argument("--reference_path" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="like cnn_dm/test.target" )
parser.add_argument("--score_path" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , default="metrics.json" , help="where to save metrics" )
parser.add_argument("--device" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="cuda, cuda:1, cpu etc." )
parser.add_argument(
"--prefix" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="will be added to the begininng of src examples" )
parser.add_argument("--task" , type=_SCREAMING_SNAKE_CASE , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=_SCREAMING_SNAKE_CASE , default=8 , required=_SCREAMING_SNAKE_CASE , help="batch size" )
parser.add_argument(
"--n_obs" , type=_SCREAMING_SNAKE_CASE , default=-1 , required=_SCREAMING_SNAKE_CASE , help="How many observations. Defaults to all." )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--dump-args" , action="store_true" , help="print the custom hparams with the results" )
parser.add_argument(
"--info" , nargs="?" , type=_SCREAMING_SNAKE_CASE , const=datetime_now() , help=(
"use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."
" lang=en-ru. If no value is passed, the current datetime string will be used."
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_known_args()
UpperCAmelCase_ = parse_numeric_n_bool_cl_kwargs(_SCREAMING_SNAKE_CASE )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
UpperCAmelCase_ = [" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
UpperCAmelCase_ = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("Can't mix --fp16 and --device cpu" )
UpperCAmelCase_ = generate_summaries_or_translations(
_SCREAMING_SNAKE_CASE , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_SCREAMING_SNAKE_CASE , )
if args.reference_path is None:
return {}
# Compute scores
UpperCAmelCase_ = calculate_bleu if "translation" in args.task else calculate_rouge
UpperCAmelCase_ = [x.rstrip() for x in open(args.save_path ).readlines()]
UpperCAmelCase_ = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_SCREAMING_SNAKE_CASE )]
UpperCAmelCase_ = score_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
scores.update(_SCREAMING_SNAKE_CASE )
if args.dump_args:
scores.update(_SCREAMING_SNAKE_CASE )
if args.info:
UpperCAmelCase_ = args.info
if verbose:
print(_SCREAMING_SNAKE_CASE )
if args.score_path is not None:
json.dump(_SCREAMING_SNAKE_CASE , open(args.score_path , "w" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 702
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for attribute in key.split("." ):
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
UpperCAmelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase_ = value
elif weight_type == "weight_g":
UpperCAmelCase_ = value
elif weight_type == "weight_v":
UpperCAmelCase_ = value
elif weight_type == "bias":
UpperCAmelCase_ = value
elif weight_type == "running_mean":
UpperCAmelCase_ = value
elif weight_type == "running_var":
UpperCAmelCase_ = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase_ = value
elif weight_type == "inv_freq":
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
UpperCAmelCase_ = fairseq_model.state_dict()
UpperCAmelCase_ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase_ = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase_ = True
if "*" in mapped_key:
UpperCAmelCase_ = name.split(lowerCAmelCase__ )[0].split("." )[-2]
UpperCAmelCase_ = mapped_key.replace("*" , lowerCAmelCase__ )
if "pos_bias_u" in name:
UpperCAmelCase_ = None
elif "pos_bias_v" in name:
UpperCAmelCase_ = None
elif "weight_g" in name:
UpperCAmelCase_ = "weight_g"
elif "weight_v" in name:
UpperCAmelCase_ = "weight_v"
elif "bias" in name:
UpperCAmelCase_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase_ = "weight"
elif "running_mean" in name:
UpperCAmelCase_ = "running_mean"
elif "inv_freq" in name:
UpperCAmelCase_ = "inv_freq"
elif "running_var" in name:
UpperCAmelCase_ = "running_var"
elif "num_batches_tracked" in name:
UpperCAmelCase_ = "num_batches_tracked"
else:
UpperCAmelCase_ = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = full_name.split("conv_layers." )[-1]
UpperCAmelCase_ = name.split("." )
UpperCAmelCase_ = int(items[0] )
UpperCAmelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ):
if config_path is not None:
UpperCAmelCase_ = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase__ , hidden_act="swish" )
else:
UpperCAmelCase_ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCAmelCase_ = "rotary"
if is_finetuned:
if dict_path:
UpperCAmelCase_ = Dictionary.load(lowerCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase_ = target_dict.pad_index
UpperCAmelCase_ = target_dict.bos_index
UpperCAmelCase_ = target_dict.eos_index
UpperCAmelCase_ = len(target_dict.symbols )
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , "vocab.json" )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCAmelCase__ ) )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
UpperCAmelCase_ = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = WavaVecaCTCTokenizer(
lowerCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCAmelCase__ , )
UpperCAmelCase_ = True if config.feat_extract_norm == "layer" else False
UpperCAmelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
UpperCAmelCase_ = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ = WavaVecaConformerForCTC(lowerCAmelCase__ )
else:
UpperCAmelCase_ = WavaVecaConformerForPreTraining(lowerCAmelCase__ )
if is_finetuned:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
UpperCAmelCase_ = argparse.Namespace(task="audio_pretraining" )
UpperCAmelCase_ = fairseq.tasks.setup_task(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase__ )
UpperCAmelCase_ = model[0].eval()
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCamelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 14
| 0
|
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def a_ ( __UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
if is_torch_version('<' , '2.0.0' ) or not hasattr(__UpperCAmelCase , '_dynamo' ):
return False
return isinstance(__UpperCAmelCase , torch._dynamo.eval_frame.OptimizedModule )
def a_ ( __UpperCAmelCase , __UpperCAmelCase = True ) -> Optional[Any]:
"""simple docstring"""
snake_case: Any =(torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
snake_case: Union[str, Any] =is_compiled_module(__UpperCAmelCase )
if is_compiled:
snake_case: List[str] =model
snake_case: Tuple =model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__UpperCAmelCase , __UpperCAmelCase ):
snake_case: str =model.module
if not keep_fpaa_wrapper:
snake_case: int =getattr(__UpperCAmelCase , 'forward' )
snake_case: Optional[Any] =model.__dict__.pop('_original_forward' , __UpperCAmelCase )
if original_forward is not None:
while hasattr(__UpperCAmelCase , '__wrapped__' ):
snake_case: Dict =forward.__wrapped__
if forward == original_forward:
break
snake_case: Union[str, Any] =forward
if getattr(__UpperCAmelCase , '_converted_to_transformer_engine' , __UpperCAmelCase ):
convert_model(__UpperCAmelCase , to_transformer_engine=__UpperCAmelCase )
if is_compiled:
snake_case: Optional[int] =model
snake_case: Optional[Any] =compiled_model
return model
def a_ ( ) -> List[str]:
"""simple docstring"""
PartialState().wait_for_everyone()
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__UpperCAmelCase , __UpperCAmelCase )
elif PartialState().local_process_index == 0:
torch.save(__UpperCAmelCase , __UpperCAmelCase )
@contextmanager
def a_ ( **__UpperCAmelCase ) -> Dict:
"""simple docstring"""
for key, value in kwargs.items():
snake_case: Tuple =str(__UpperCAmelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def a_ ( __UpperCAmelCase ) -> Any:
"""simple docstring"""
if not hasattr(__UpperCAmelCase , '__qualname__' ) and not hasattr(__UpperCAmelCase , '__name__' ):
snake_case: List[Any] =getattr(__UpperCAmelCase , '__class__' , __UpperCAmelCase )
if hasattr(__UpperCAmelCase , '__qualname__' ):
return obj.__qualname__
if hasattr(__UpperCAmelCase , '__name__' ):
return obj.__name__
return str(__UpperCAmelCase )
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
"""simple docstring"""
for key, value in source.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
snake_case: Optional[Any] =destination.setdefault(__UpperCAmelCase , {} )
merge_dicts(__UpperCAmelCase , __UpperCAmelCase )
else:
snake_case: int =value
return destination
def a_ ( __UpperCAmelCase = None ) -> bool:
"""simple docstring"""
if port is None:
snake_case: Optional[int] =2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 350
|
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def a_ ( __UpperCAmelCase ) -> int:
"""simple docstring"""
snake_case: List[str] =checkpoints.load_tax_checkpoint(__UpperCAmelCase )
snake_case: str =flatten_dict(__UpperCAmelCase )
return flax_params
def a_ ( __UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case: int ={}
snake_case: Any ={
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
snake_case: Union[str, Any] ={
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
snake_case: Dict ='.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
snake_case: Union[str, Any] =new_key.replace(__UpperCAmelCase , __UpperCAmelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
snake_case: Optional[Any] =new_key.replace(__UpperCAmelCase , __UpperCAmelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
snake_case: Any =re.sub(R'layers_(\d+)' , R'layer.\1' , __UpperCAmelCase )
snake_case: Optional[int] =new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
snake_case: Optional[Any] =re.sub(R'layers_(\d+)' , R'layer.\1' , __UpperCAmelCase )
snake_case: Optional[int] =flax_dict[key]
snake_case: Optional[int] ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
snake_case: Union[str, Any] =torch.from_numpy(converted_dict[key].T )
else:
snake_case: Union[str, Any] =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False ) -> Tuple:
"""simple docstring"""
snake_case: Any =get_flax_param(__UpperCAmelCase )
if not use_large:
snake_case: Optional[int] =PixaStructVisionConfig()
snake_case: int =PixaStructTextConfig()
else:
snake_case: Tuple =PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
snake_case: List[Any] =PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
snake_case: Any =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__UpperCAmelCase )
snake_case: List[Any] =PixaStructForConditionalGeneration(__UpperCAmelCase )
snake_case: str =rename_and_convert_flax_params(__UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
snake_case: List[Any] =AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
snake_case: Optional[Any] =PixaStructImageProcessor()
snake_case: Optional[int] =PixaStructProcessor(image_processor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
if use_large:
snake_case: Optional[Any] =40_96
snake_case: str =True
# mkdir if needed
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
print('Model saved in {}'.format(__UpperCAmelCase ) )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
a = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 350
| 1
|
"""simple docstring"""
import random
def __A ( a_ :Union[str, Any] , a_ :List[Any] , a_ :List[Any]) -> Union[str, Any]:
__a : str = a[left_index]
__a : Optional[int] = left_index + 1
for j in range(left_index + 1 , a_):
if a[j] < pivot:
__a , __a : List[str] = a[i], a[j]
i += 1
__a , __a : Union[str, Any] = a[i - 1], a[left_index]
return i - 1
def __A ( a_ :List[Any] , a_ :List[Any] , a_ :List[Any]) -> str:
if left < right:
__a : Optional[Any] = random.randint(a_ , right - 1)
__a , __a : List[str] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__a : str = partition(a_ , a_ , a_)
quick_sort_random(
a_ , a_ , a_) # recursive quicksort to the left of the pivot point
quick_sort_random(
a_ , pivot_index + 1 , a_) # recursive quicksort to the right of the pivot point
def __A ( ) -> Optional[int]:
__a : int = input('''Enter numbers separated by a comma:\n''').strip()
__a : Dict = [int(a_) for item in user_input.split(''',''')]
quick_sort_random(a_ , 0 , len(a_))
print(a_)
if __name__ == "__main__":
main()
| 101
|
"""simple docstring"""
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = OpenAIGPTTokenizer
__lowerCAmelCase = OpenAIGPTTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__a : str = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__a : List[Any] = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
__a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_UpperCAmelCase ) )
def _lowerCamelCase ( self , _UpperCAmelCase ):
return "lower newer", "lower newer"
def _lowerCamelCase ( self ):
__a : Tuple = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__a : Tuple = '''lower'''
__a : Union[str, Any] = ['''low''', '''er</w>''']
__a : str = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : List[Any] = tokens + ['''<unk>''']
__a : Tuple = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : Any = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
# Simple input
__a : str = '''This is a simple input'''
__a : Dict = ['''This is a simple input 1''', '''This is a simple input 2''']
__a : List[Any] = ('''This is a simple input''', '''This is a pair''')
__a : Optional[Any] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' , )
def _lowerCamelCase ( self ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
pass
| 101
| 1
|
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__A ='<<<<<<< This should probably be modified because it mentions: '
__A ='=======\n>>>>>>>\n'
__A =[
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
__A =[
# (pattern, replacement)
# Order is important here for some replacements
(R'tfds\.core', R'datasets'),
(R'tf\.io\.gfile\.GFile', R'open'),
(R'tf\.([\w\d]+)', R'datasets.Value(\'\1\')'),
(R'tfds\.features\.Text\(\)', R'datasets.Value(\'string\')'),
(R'tfds\.features\.Text\(', R'datasets.Value(\'string\'),'),
(R'features\s*=\s*tfds.features.FeaturesDict\(', R'features=datasets.Features('),
(R'tfds\.features\.FeaturesDict\(', R'dict('),
(R'The TensorFlow Datasets Authors', R'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(R'tfds\.', R'datasets.'),
(R'dl_manager\.manual_dir', R'self.config.data_dir'),
(R'self\.builder_config', R'self.config'),
]
def _UpperCamelCase ( UpperCamelCase__ ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _snake_case ( a__ ):
@staticmethod
def snake_case__ ( _lowerCamelCase):
UpperCAmelCase__ : List[str] = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=_lowerCamelCase , required=_lowerCamelCase , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=_lowerCamelCase , required=_lowerCamelCase , help="""Path to the HuggingFace Datasets folder.""")
train_parser.set_defaults(func=_lowerCamelCase)
def __init__( self , _lowerCamelCase , _lowerCamelCase , *_lowerCamelCase):
UpperCAmelCase__ : Optional[Any] = get_logger("""datasets-cli/converting""")
UpperCAmelCase__ : Any = tfds_path
UpperCAmelCase__ : Any = datasets_directory
def snake_case__ ( self):
if os.path.isdir(self._tfds_path):
UpperCAmelCase__ : Dict = os.path.abspath(self._tfds_path)
elif os.path.isfile(self._tfds_path):
UpperCAmelCase__ : str = os.path.dirname(self._tfds_path)
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""")
UpperCAmelCase__ : List[Any] = os.path.abspath(self._datasets_directory)
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''')
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : Optional[Any] = {}
if os.path.isdir(self._tfds_path):
UpperCAmelCase__ : Dict = os.listdir(_lowerCamelCase)
else:
UpperCAmelCase__ : List[Any] = [os.path.basename(self._tfds_path)]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''')
UpperCAmelCase__ : str = os.path.join(_lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase)
if not os.path.isfile(_lowerCamelCase) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""")
continue
with open(_lowerCamelCase , encoding="""utf-8""") as f:
UpperCAmelCase__ : Optional[Any] = f.readlines()
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[Any] = []
for line in lines:
UpperCAmelCase__ : Optional[Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
UpperCAmelCase__ : str = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
UpperCAmelCase__ : Optional[Any] = """"""
continue
elif "from absl import logging" in out_line:
UpperCAmelCase__ : List[Any] = """from datasets import logging\n"""
elif "getLogger" in out_line:
UpperCAmelCase__ : Optional[int] = out_line.replace("""getLogger""" , """get_logger""")
elif any(expression in out_line for expression in TO_HIGHLIGHT):
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : List[Any] = list(filter(lambda _lowerCamelCase: e in out_line , _lowerCamelCase))
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowerCamelCase) + """\n""")
out_lines.append(_lowerCamelCase)
out_lines.append(_lowerCamelCase)
continue
else:
for pattern, replacement in TO_CONVERT:
UpperCAmelCase__ : Optional[Any] = re.sub(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
UpperCAmelCase__ : List[str] = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , _lowerCamelCase)
tfds_imports.extend(imp.strip() for imp in match.group(1).split(""","""))
UpperCAmelCase__ : Dict = """from . import """ + match.group(1)
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''')
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
UpperCAmelCase__ : int = True
out_lines.append(_lowerCamelCase)
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
UpperCAmelCase__ : Optional[Any] = f_name.replace(""".py""" , """""")
UpperCAmelCase__ : Union[str, Any] = os.path.join(_lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : int = os.path.join(_lowerCamelCase , _lowerCamelCase)
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase)
self._logger.info(f'''Adding directory {output_dir}''')
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports})
else:
# Utilities will be moved at the end
utils_files.append(_lowerCamelCase)
if needs_manual_update:
with_manual_update.append(_lowerCamelCase)
with open(_lowerCamelCase , """w""" , encoding="""utf-8""") as f:
f.writelines(_lowerCamelCase)
self._logger.info(f'''Converted in {output_file}''')
for utils_file in utils_files:
try:
UpperCAmelCase__ : Optional[int] = os.path.basename(_lowerCamelCase)
UpperCAmelCase__ : int = imports_to_builder_map[f_name.replace(""".py""" , """""")]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''')
shutil.copy(_lowerCamelCase , _lowerCamelCase)
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''')
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''')
| 407
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A =get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _snake_case ( a__ , unittest.TestCase ):
lowerCAmelCase :List[str] = XLNetTokenizer
lowerCAmelCase :Union[str, Any] = XLNetTokenizerFast
lowerCAmelCase :Union[str, Any] = True
lowerCAmelCase :int = True
def snake_case__ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : Optional[Any] = XLNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase)
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = """<s>"""
UpperCAmelCase__ : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase) , _lowerCamelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase) , _lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : int = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<unk>""")
self.assertEqual(vocab_keys[1] , """<s>""")
self.assertEqual(vocab_keys[-1] , """<eod>""")
self.assertEqual(len(_lowerCamelCase) , 1006)
def snake_case__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def snake_case__ ( self):
UpperCAmelCase__ : int = XLNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase)
UpperCAmelCase__ : str = tokenizer.tokenize("""This is a test""")
self.assertListEqual(_lowerCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase) , [285, 46, 10, 170, 382])
UpperCAmelCase__ : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase__ : List[str] = tokenizer.convert_tokens_to_ids(_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4])
UpperCAmelCase__ : Optional[int] = tokenizer.convert_ids_to_tokens(_lowerCamelCase)
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = XLNetTokenizer(_lowerCamelCase , do_lower_case=_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""▁he""", """ll""", """o"""])
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = XLNetTokenizer(_lowerCamelCase , do_lower_case=_lowerCamelCase)
UpperCAmelCase__ : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = XLNetTokenizer.from_pretrained("""xlnet-base-cased""")
UpperCAmelCase__ : Dict = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCamelCase)
UpperCAmelCase__ : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCamelCase)
UpperCAmelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def snake_case__ ( self):
# fmt: off
UpperCAmelCase__ : List[Any] = {"""input_ids""": [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 407
| 1
|
from typing import Any
import numpy as np
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :np.ndarray ) -> bool:
return np.array_equal(SCREAMING_SNAKE_CASE , matrix.conjugate().T )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :np.ndarray ) -> Any:
__lowerCAmelCase : Dict = v.conjugate().T
__lowerCAmelCase : str = v_star.dot(SCREAMING_SNAKE_CASE )
assert isinstance(SCREAMING_SNAKE_CASE , np.ndarray )
return (v_star_dot.dot(SCREAMING_SNAKE_CASE )) / (v_star.dot(SCREAMING_SNAKE_CASE ))
def _SCREAMING_SNAKE_CASE ( ) -> None:
__lowerCAmelCase : str = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
__lowerCAmelCase : Union[str, Any] = np.array([[1], [2], [3]] )
assert is_hermitian(SCREAMING_SNAKE_CASE ), F'''{a} is not hermitian.'''
print(rayleigh_quotient(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Optional[Any] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(SCREAMING_SNAKE_CASE ), F'''{a} is not hermitian.'''
assert rayleigh_quotient(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 240
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class snake_case_ ( pl.LightningModule ):
def __init__( self : Union[str, Any] , _snake_case : List[str] )->List[str]:
'''simple docstring'''
super().__init__()
__lowerCAmelCase : Dict = model
__lowerCAmelCase : str = 2
__lowerCAmelCase : List[str] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def UpperCAmelCase__ ( self : str )->Dict:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str ) -> str:
# load longformer model from model identifier
__lowerCAmelCase : int = LongformerModel.from_pretrained(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = LightningModel(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
__lowerCAmelCase : Optional[Any] = LongformerForQuestionAnswering.from_pretrained(SCREAMING_SNAKE_CASE )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_UpperCAmelCase = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 240
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __UpperCamelCase :
def __init__( self :int ,_UpperCamelCase :int ):
snake_case_ : str = num_of_nodes
snake_case_ : list[list[int]] = []
snake_case_ : dict[int, int] = {}
def a__ ( self :int ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int ):
self.m_edges.append([u_node, v_node, weight] )
def a__ ( self :Union[str, Any] ,_UpperCamelCase :int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def a__ ( self :List[str] ,_UpperCamelCase :int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
snake_case_ : Any = self.find_component(_UpperCamelCase )
def a__ ( self :Optional[Any] ,_UpperCamelCase :list[int] ,_UpperCamelCase :int ,_UpperCamelCase :int ):
if component_size[u_node] <= component_size[v_node]:
snake_case_ : List[Any] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_UpperCamelCase )
elif component_size[u_node] >= component_size[v_node]:
snake_case_ : Union[str, Any] = self.find_component(_UpperCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_UpperCamelCase )
def a__ ( self :Any ):
snake_case_ : List[Any] = []
snake_case_ : Union[str, Any] = 0
snake_case_ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
snake_case_ : Optional[int] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
snake_case_ , snake_case_ , snake_case_ : Dict = edge
snake_case_ : Tuple = self.m_component[u]
snake_case_ : Dict = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
snake_case_ : Optional[int] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
snake_case_ , snake_case_ , snake_case_ : Tuple = edge
snake_case_ : Any = self.m_component[u]
snake_case_ : Optional[int] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
snake_case_ : Optional[int] = [-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def UpperCAmelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 334
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__A : Dict = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class __UpperCamelCase ( unittest.TestCase ):
lowercase : Optional[int] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase : Tuple = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowercase : int = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowercase : Any = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def a__ ( self :int ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :str ,_UpperCamelCase :List[Any] ):
snake_case_ : Optional[Any] = ZeroShotClassificationPipeline(
model=_UpperCamelCase ,tokenizer=_UpperCamelCase ,candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def a__ ( self :Dict ,_UpperCamelCase :Any ,_UpperCamelCase :int ):
snake_case_ : int = classifier("""Who are you voting for in 2020?""" ,candidate_labels="""politics""" )
self.assertEqual(_UpperCamelCase ,{"""sequence""": ANY(_UpperCamelCase ), """labels""": [ANY(_UpperCamelCase )], """scores""": [ANY(_UpperCamelCase )]} )
# No kwarg
snake_case_ : List[str] = classifier("""Who are you voting for in 2020?""" ,["""politics"""] )
self.assertEqual(_UpperCamelCase ,{"""sequence""": ANY(_UpperCamelCase ), """labels""": [ANY(_UpperCamelCase )], """scores""": [ANY(_UpperCamelCase )]} )
snake_case_ : Union[str, Any] = classifier("""Who are you voting for in 2020?""" ,candidate_labels=["""politics"""] )
self.assertEqual(_UpperCamelCase ,{"""sequence""": ANY(_UpperCamelCase ), """labels""": [ANY(_UpperCamelCase )], """scores""": [ANY(_UpperCamelCase )]} )
snake_case_ : int = classifier("""Who are you voting for in 2020?""" ,candidate_labels="""politics, public health""" )
self.assertEqual(
_UpperCamelCase ,{"""sequence""": ANY(_UpperCamelCase ), """labels""": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )], """scores""": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) ,1.0 )
snake_case_ : Any = classifier("""Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
_UpperCamelCase ,{"""sequence""": ANY(_UpperCamelCase ), """labels""": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )], """scores""": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) ,1.0 )
snake_case_ : List[Any] = classifier(
"""Who are you voting for in 2020?""" ,candidate_labels="""politics""" ,hypothesis_template="""This text is about {}""" )
self.assertEqual(_UpperCamelCase ,{"""sequence""": ANY(_UpperCamelCase ), """labels""": [ANY(_UpperCamelCase )], """scores""": [ANY(_UpperCamelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
snake_case_ : Union[str, Any] = classifier(["""I am happy"""] ,["""positive""", """negative"""] )
self.assertEqual(
_UpperCamelCase ,[
{"""sequence""": ANY(_UpperCamelCase ), """labels""": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )], """scores""": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )]}
for i in range(1 )
] ,)
snake_case_ : Union[str, Any] = classifier(["""I am happy""", """I am sad"""] ,["""positive""", """negative"""] )
self.assertEqual(
_UpperCamelCase ,[
{"""sequence""": ANY(_UpperCamelCase ), """labels""": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )], """scores""": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )]}
for i in range(2 )
] ,)
with self.assertRaises(_UpperCamelCase ):
classifier("""""" ,candidate_labels="""politics""" )
with self.assertRaises(_UpperCamelCase ):
classifier(_UpperCamelCase ,candidate_labels="""politics""" )
with self.assertRaises(_UpperCamelCase ):
classifier("""Who are you voting for in 2020?""" ,candidate_labels="""""" )
with self.assertRaises(_UpperCamelCase ):
classifier("""Who are you voting for in 2020?""" ,candidate_labels=_UpperCamelCase )
with self.assertRaises(_UpperCamelCase ):
classifier(
"""Who are you voting for in 2020?""" ,candidate_labels="""politics""" ,hypothesis_template="""Not formatting template""" ,)
with self.assertRaises(_UpperCamelCase ):
classifier(
"""Who are you voting for in 2020?""" ,candidate_labels="""politics""" ,hypothesis_template=_UpperCamelCase ,)
self.run_entailment_id(_UpperCamelCase )
def a__ ( self :Tuple ,_UpperCamelCase :Pipeline ):
snake_case_ : int = zero_shot_classifier.model.config
snake_case_ : Union[str, Any] = config.labelaid
snake_case_ : Optional[Any] = zero_shot_classifier.entailment_id
snake_case_ : List[Any] = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id ,-1 )
snake_case_ : Optional[Any] = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id ,0 )
snake_case_ : int = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id ,0 )
snake_case_ : Tuple = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id ,2 )
snake_case_ : Optional[Any] = original_labelaid
self.assertEqual(_UpperCamelCase ,zero_shot_classifier.entailment_id )
@require_torch
def a__ ( self :Any ):
snake_case_ : Dict = pipeline(
"""zero-shot-classification""" ,model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" ,framework="""pt""" ,)
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 1_0_0 ,candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def a__ ( self :Dict ):
snake_case_ : Union[str, Any] = pipeline(
"""zero-shot-classification""" ,model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" ,framework="""pt""" ,)
snake_case_ : Union[str, Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(_UpperCamelCase ) ,{
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_33, 0.3_33, 0.3_33],
} ,)
@require_tf
def a__ ( self :Tuple ):
snake_case_ : Union[str, Any] = pipeline(
"""zero-shot-classification""" ,model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" ,framework="""tf""" ,)
snake_case_ : Union[str, Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(_UpperCamelCase ) ,{
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_33, 0.3_33, 0.3_33],
} ,)
@slow
@require_torch
def a__ ( self :Optional[int] ):
snake_case_ : Dict = pipeline("""zero-shot-classification""" ,model="""roberta-large-mnli""" ,framework="""pt""" )
snake_case_ : str = zero_shot_classifier(
"""Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(_UpperCamelCase ) ,{
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_76, 0.0_15, 0.0_09],
} ,)
snake_case_ : int = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" ,candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] ,multi_label=_UpperCamelCase ,)
self.assertEqual(
nested_simplify(_UpperCamelCase ) ,{
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} ,)
@slow
@require_tf
def a__ ( self :Optional[int] ):
snake_case_ : List[str] = pipeline("""zero-shot-classification""" ,model="""roberta-large-mnli""" ,framework="""tf""" )
snake_case_ : str = zero_shot_classifier(
"""Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(_UpperCamelCase ) ,{
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_76, 0.0_15, 0.0_09],
} ,)
snake_case_ : Optional[int] = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" ,candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] ,multi_label=_UpperCamelCase ,)
self.assertEqual(
nested_simplify(_UpperCamelCase ) ,{
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} ,)
| 334
| 1
|
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( _lowercase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Any = XLNetTokenizer
__magic_name__ : Optional[Any] = XLNetTokenizerFast
__magic_name__ : List[str] = True
__magic_name__ : Union[str, Any] = True
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : Tuple = XLNetTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__)
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
snake_case__ : Dict = "<s>"
snake_case__ : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__) , lowerCamelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__) , lowerCamelCase__)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
snake_case__ : str = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<unk>")
self.assertEqual(vocab_keys[1] , "<s>")
self.assertEqual(vocab_keys[-1] , "<eod>")
self.assertEqual(len(lowerCamelCase__) , 1_006)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_000)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
snake_case__ : Any = XLNetTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__)
snake_case__ : List[str] = tokenizer.tokenize("This is a test")
self.assertListEqual(lowerCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__) , [285, 46, 10, 170, 382])
snake_case__ : Tuple = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
snake_case__ : int = tokenizer.convert_tokens_to_ids(lowerCamelCase__)
self.assertListEqual(lowerCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4])
snake_case__ : str = tokenizer.convert_ids_to_tokens(lowerCamelCase__)
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
snake_case__ : str = XLNetTokenizer(lowerCamelCase__ , do_lower_case=lowerCamelCase__)
snake_case__ : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + "",
"i",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["▁he", "ll", "o"])
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
snake_case__ : List[Any] = XLNetTokenizer(lowerCamelCase__ , do_lower_case=lowerCamelCase__)
snake_case__ : str = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
@slow
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
snake_case__ : Union[str, Any] = XLNetTokenizer.from_pretrained("xlnet-base-cased")
snake_case__ : List[Any] = tokenizer.encode("sequence builders" , add_special_tokens=lowerCamelCase__)
snake_case__ : Optional[int] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCamelCase__)
snake_case__ : Dict = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__)
snake_case__ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
snake_case__ : str = {"input_ids": [[17, 21_442, 270, 17, 10, 14_645, 318, 34, 17, 4_546, 3_145, 787, 13, 7_752, 22_018, 23, 21, 17, 4_546, 3_145, 787, 13, 3_352, 14_431, 13, 5_500, 11, 1_176, 580, 13, 16_819, 4_797, 23, 17, 10, 17_135, 658, 19, 457, 7_932, 13, 184, 19, 3_154, 17_135, 6_468, 19, 1_404, 12_269, 19, 4_229, 5_356, 16_264, 46, 19, 17, 20_545, 10_395, 9, 9, 9, 11, 28, 6_421, 9_531, 20_729, 17, 10, 353, 17_022, 11, 21, 6_421, 9_531, 16_949, 17, 10, 11_509, 753, 11, 33, 95, 2_421, 7_385, 956, 14_431, 2_626, 25, 842, 7_385, 4_836, 21, 1_429, 2_272, 9_855, 3_120, 161, 24_738, 19, 13_203, 658, 218, 787, 21, 430, 18_482, 847, 2_637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22_178, 27, 1_064, 22, 956, 13, 11_101, 1_429, 5_854, 24_313, 18_953, 40, 422, 24_366, 68, 1_758, 37, 10_483, 14_257, 31, 207, 263, 21, 203, 3_773, 25, 71, 9_735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2_049, 3_442, 17, 13_894, 3_380, 23, 95, 18, 17_634, 2_288, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , )
| 715
|
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowercase = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
lowercase = {"""facebook/blenderbot_small-90M""": 512}
def A__ ( _UpperCAmelCase : Dict ) -> Tuple:
'''simple docstring'''
snake_case__ : List[str] = set()
snake_case__ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : Any = char
snake_case__ : List[str] = set(_UpperCAmelCase )
return pairs
class SCREAMING_SNAKE_CASE_ ( _lowercase):
'''simple docstring'''
__magic_name__ : Dict = VOCAB_FILES_NAMES
__magic_name__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="__start__" , lowerCamelCase__="__end__" , lowerCamelCase__="__unk__" , lowerCamelCase__="__null__" , **lowerCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(unk_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , **lowerCamelCase__)
with open(lowerCamelCase__ , encoding="utf-8") as vocab_handle:
snake_case__ : int = json.load(lowerCamelCase__)
snake_case__ : List[str] = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase__ , encoding="utf-8") as merges_handle:
snake_case__ : Any = merges_handle.read().split("\n")[1:-1]
snake_case__ : Optional[int] = [tuple(merge.split()) for merge in merges]
snake_case__ : Optional[Any] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__))))
snake_case__ : List[str] = {}
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return len(self.encoder)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase ( self , lowerCamelCase__) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
snake_case__ : Tuple = re.sub("([.,!?()])" , R" \1" , lowerCamelCase__)
snake_case__ : List[Any] = re.sub("(')" , R" \1 " , lowerCamelCase__)
snake_case__ : Dict = re.sub(R"\s{2,}" , " " , lowerCamelCase__)
if "\n" in token:
snake_case__ : Tuple = token.replace("\n" , " __newln__")
snake_case__ : Optional[int] = token.split(" ")
snake_case__ : int = []
for token in tokens:
if not len(lowerCamelCase__):
continue
snake_case__ : str = token.lower()
snake_case__ : List[str] = tuple(lowerCamelCase__)
snake_case__ : str = tuple(list(word[:-1]) + [word[-1] + "</w>"])
snake_case__ : Optional[int] = get_pairs(lowerCamelCase__)
if not pairs:
words.append(lowerCamelCase__)
continue
while True:
snake_case__ : int = min(lowerCamelCase__ , key=lambda lowerCamelCase__: self.bpe_ranks.get(lowerCamelCase__ , float("inf")))
if bigram not in self.bpe_ranks:
break
snake_case__, snake_case__ : Any = bigram
snake_case__ : Optional[int] = []
snake_case__ : str = 0
while i < len(lowerCamelCase__):
try:
snake_case__ : Any = word.index(lowerCamelCase__ , lowerCamelCase__)
new_word.extend(word[i:j])
snake_case__ : Tuple = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(lowerCamelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
snake_case__ : Optional[int] = tuple(lowerCamelCase__)
snake_case__ : str = new_word
if len(lowerCamelCase__) == 1:
break
else:
snake_case__ : Optional[int] = get_pairs(lowerCamelCase__)
snake_case__ : Tuple = "@@ ".join(lowerCamelCase__)
snake_case__ : Union[str, Any] = word[:-4]
snake_case__ : Any = word
words.append(lowerCamelCase__)
return " ".join(lowerCamelCase__)
def UpperCAmelCase ( self , lowerCamelCase__) -> List[str]:
'''simple docstring'''
snake_case__ : Any = []
snake_case__ : Union[str, Any] = re.findall(R"\S+\n?" , lowerCamelCase__)
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase__).split(" ")))
return split_tokens
def UpperCAmelCase ( self , lowerCamelCase__) -> int:
'''simple docstring'''
snake_case__ : str = token.lower()
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token))
def UpperCAmelCase ( self , lowerCamelCase__) -> str:
'''simple docstring'''
return self.decoder.get(lowerCamelCase__ , self.unk_token)
def UpperCAmelCase ( self , lowerCamelCase__) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = " ".join(lowerCamelCase__).replace("@@ " , "").strip()
return out_string
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
snake_case__ : int = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
snake_case__ : Tuple = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(lowerCamelCase__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__) + "\n")
snake_case__ : str = 0
with open(lowerCamelCase__ , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase__: kv[1]):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!")
snake_case__ : Tuple = token_index
writer.write(" ".join(lowerCamelCase__) + "\n")
index += 1
return vocab_file, merge_file
| 150
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
UpperCamelCase__ : int = None
UpperCamelCase__ : int = logging.get_logger(__name__)
UpperCamelCase__ : Dict = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase__ : Union[str, Any] = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
UpperCamelCase__ : List[str] = {
"facebook/mbart-large-en-ro": 1_024,
"facebook/mbart-large-cc25": 1_024,
}
# fmt: off
UpperCamelCase__ : int = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class _a (lowercase_):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE = MBartTokenizer
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
def __init__( self , A__=None , A__=None , A__="<s>" , A__="</s>" , A__="</s>" , A__="<s>" , A__="<unk>" , A__="<pad>" , A__="<mask>" , A__=None , A__=None , A__=None , **A__ , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
_SCREAMING_SNAKE_CASE = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
vocab_file=_lowercase , tokenizer_file=_lowercase , bos_token=_lowercase , eos_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , src_lang=_lowercase , tgt_lang=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
_SCREAMING_SNAKE_CASE = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
_SCREAMING_SNAKE_CASE = {
lang_code: self.convert_tokens_to_ids(_lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else """en_XX"""
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(self._src_lang )
_SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase ( self ) -> str:
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self , A__ ) -> None:
_SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase ( self , A__ , A__ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self , A__ , A__ = None ) -> List[int]:
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , **A__ ) -> int:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = self(_lowercase , add_special_tokens=_lowercase , return_tensors=_lowercase , **_lowercase )
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(_lowercase )
_SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def UpperCamelCase ( self , A__ , A__ = "en_XX" , A__ = None , A__ = "ro_RO" , **A__ , ) -> BatchEncoding:
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(_lowercase , _lowercase , **_lowercase )
def UpperCamelCase ( self ) -> Dict:
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase ( self ) -> Optional[int]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase ( self , A__ ) -> None:
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(_lowercase )
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self , A__ ) -> None:
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(_lowercase )
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self , A__ , A__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory." )
return
_SCREAMING_SNAKE_CASE = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 591
|
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
SCREAMING_SNAKE_CASE : int = get_tests_dir("fixtures")
SCREAMING_SNAKE_CASE : str = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
SCREAMING_SNAKE_CASE : str = get_tests_dir("fixtures/dummy-config.json")
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> str:
SCREAMING_SNAKE_CASE_ = 0
def a__ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(_lowercase, _lowercase )
def a__ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase, _lowercase )
def a__ ( self ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(_lowercase ).to_dict()
config_dict.pop('feature_extractor_type' )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor(**_lowercase )
# save in new folder
model_config.save_pretrained(_lowercase )
config.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(_lowercase )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE_ = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(_lowercase, _lowercase )
def a__ ( self ) -> int:
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase, _lowercase )
def a__ ( self ) -> Any:
with self.assertRaisesRegex(
_lowercase, 'bert-base is not a local folder and is not a valid model identifier' ):
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained('bert-base' )
def a__ ( self ) -> List[Any]:
with self.assertRaisesRegex(
_lowercase, R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(_lowercase, revision='aaaaaa' )
def a__ ( self ) -> List[Any]:
with self.assertRaisesRegex(
_lowercase, 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.', ):
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def a__ ( self ) -> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor', trust_remote_code=_lowercase )
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor', trust_remote_code=_lowercase )
self.assertEqual(feature_extractor.__class__.__name__, 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(_lowercase, trust_remote_code=_lowercase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__, 'NewFeatureExtractor' )
def a__ ( self ) -> int:
try:
AutoConfig.register('custom', _lowercase )
AutoFeatureExtractor.register(_lowercase, _lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowercase ):
AutoFeatureExtractor.register(_lowercase, _lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE_ = CustomFeatureExtractor.from_pretrained(_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase, _lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> str:
class snake_case ( lowercase_ ):
"""simple docstring"""
_a = True
try:
AutoConfig.register('custom', _lowercase )
AutoFeatureExtractor.register(_lowercase, _lowercase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__, 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor', trust_remote_code=_lowercase )
self.assertEqual(feature_extractor.__class__.__name__, 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor', trust_remote_code=_lowercase )
self.assertEqual(feature_extractor.__class__.__name__, 'NewFeatureExtractor' )
self.assertTrue(not hasattr(_lowercase, 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 294
| 0
|
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _snake_case ( _SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def _snake_case ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
TestCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
RunBeamCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
DummyDataCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
# Parse args
lowerCAmelCase, lowerCAmelCase = parser.parse_known_args()
if not hasattr(_SCREAMING_SNAKE_CASE , """func""" ):
parser.print_help()
exit(1 )
lowerCAmelCase = parse_unknown_args(_SCREAMING_SNAKE_CASE )
# Run
lowerCAmelCase = args.func(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 713
|
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(A_ )
lowerCAmelCase = -1
lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A_ )
lowerCAmelCase = model.generate(A_ , max_new_tokens=10 , do_sample=A_ )
lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase = TextStreamer(A_ )
model.generate(A_ , max_new_tokens=10 , do_sample=A_ , streamer=A_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase = cs.out[:-1]
self.assertEqual(A_ , A_ )
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(A_ )
lowerCAmelCase = -1
lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A_ )
lowerCAmelCase = model.generate(A_ , max_new_tokens=10 , do_sample=A_ )
lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
lowerCAmelCase = TextIteratorStreamer(A_ )
lowerCAmelCase = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
lowerCAmelCase = Thread(target=model.generate , kwargs=A_ )
thread.start()
lowerCAmelCase = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A_ , A_ )
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(A_ )
lowerCAmelCase = -1
lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A_ )
lowerCAmelCase = model.generate(A_ , max_new_tokens=10 , do_sample=A_ )
lowerCAmelCase = greedy_ids[:, input_ids.shape[1] :]
lowerCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase = TextStreamer(A_ , skip_prompt=A_ )
model.generate(A_ , max_new_tokens=10 , do_sample=A_ , streamer=A_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase = cs.out[:-1]
self.assertEqual(A_ , A_ )
def __snake_case ( self ) -> int:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCAmelCase = AutoTokenizer.from_pretrained("""distilgpt2""" )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(A_ )
lowerCAmelCase = -1
lowerCAmelCase = torch.ones((1, 5) , device=A_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCAmelCase = TextStreamer(A_ , skip_special_tokens=A_ )
model.generate(A_ , max_new_tokens=1 , do_sample=A_ , streamer=A_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCAmelCase = cs.out[:-1] # Remove the final "\n"
lowerCAmelCase = tokenizer(A_ , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(A_ )
lowerCAmelCase = -1
lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A_ )
lowerCAmelCase = TextIteratorStreamer(A_ , timeout=0.0_0_1 )
lowerCAmelCase = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
lowerCAmelCase = Thread(target=model.generate , kwargs=A_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A_ ):
lowerCAmelCase = """"""
for new_text in streamer:
streamer_text += new_text
| 344
| 0
|
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __snake_case ( __A ,__A ,unittest.TestCase):
"""simple docstring"""
lowercase = VQModel
lowercase = 'sample'
@property
def __lowercase ( self : int , lowerCamelCase : Tuple=(32, 32) ) -> Optional[int]:
lowerCAmelCase_ : Union[str, Any] = 4
lowerCAmelCase_ : List[Any] = 3
lowerCAmelCase_ : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCAmelCase )
return {"sample": image}
@property
def __lowercase ( self : Tuple ) -> Dict:
return (3, 32, 32)
@property
def __lowercase ( self : str ) -> int:
return (3, 32, 32)
def __lowercase ( self : int ) -> int:
lowerCAmelCase_ : Dict = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 3,
}
lowerCAmelCase_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def __lowercase ( self : Optional[int] ) -> str:
pass
def __lowercase ( self : List[Any] ) -> List[Any]:
pass
def __lowercase ( self : List[str] ) -> List[str]:
lowerCAmelCase_ : Optional[Any] = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__UpperCAmelCase )
lowerCAmelCase_ : str = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __lowercase ( self : Optional[int] ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(__UpperCAmelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCAmelCase_ : int = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
lowerCAmelCase_ : List[Any] = image.to(__UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase_ : Union[str, Any] = model(__UpperCAmelCase ).sample
lowerCAmelCase_ : Optional[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase_ : Optional[Any] = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
| 275
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowerCAmelCase_ ( __A , __A ):
'''simple docstring'''
_lowercase = 'swin'
_lowercase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 12, 24] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =image_size
SCREAMING_SNAKE_CASE_ : Dict =patch_size
SCREAMING_SNAKE_CASE_ : int =num_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] =embed_dim
SCREAMING_SNAKE_CASE_ : int =depths
SCREAMING_SNAKE_CASE_ : Optional[Any] =len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict =num_heads
SCREAMING_SNAKE_CASE_ : Optional[int] =window_size
SCREAMING_SNAKE_CASE_ : List[Any] =mlp_ratio
SCREAMING_SNAKE_CASE_ : List[str] =qkv_bias
SCREAMING_SNAKE_CASE_ : List[Any] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] =drop_path_rate
SCREAMING_SNAKE_CASE_ : str =hidden_act
SCREAMING_SNAKE_CASE_ : List[str] =use_absolute_embeddings
SCREAMING_SNAKE_CASE_ : int =layer_norm_eps
SCREAMING_SNAKE_CASE_ : Dict =initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] =encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE_ : int =int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
SCREAMING_SNAKE_CASE_ : List[Any] =['stem'] + [F"""stage{idx}""" for idx in range(1 , len(__UpperCAmelCase ) + 1 )]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str =get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = version.parse('1.11' )
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
| 220
| 0
|
"""simple docstring"""
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _snake_case : List[str] , _snake_case : str=2 , _snake_case : Any=56 , _snake_case : str=True , _snake_case : int=True , _snake_case : int=True , _snake_case : Any=True , _snake_case : Optional[Any]=99 , _snake_case : Union[str, Any]=32 , _snake_case : Tuple=2 , _snake_case : Optional[Any]=2 , _snake_case : Union[str, Any]=7 , _snake_case : Any="gelu_new" , _snake_case : List[Any]=0.1 , _snake_case : int=0.1 , _snake_case : Dict=512 , _snake_case : Dict=16 , _snake_case : Optional[Any]=2 , _snake_case : int=0.02 , _snake_case : Optional[int]=4 , _snake_case : List[Any]="block_sparse" , _snake_case : Optional[int]=True , _snake_case : Any=False , _snake_case : Optional[Any]=2 , _snake_case : Any=3 , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_attention_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = rescale_embeddings
SCREAMING_SNAKE_CASE__ = attention_type
SCREAMING_SNAKE_CASE__ = use_bias
SCREAMING_SNAKE_CASE__ = block_size
SCREAMING_SNAKE_CASE__ = num_random_blocks
def lowerCAmelCase_ ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase_ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCamelCase (_SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
a = False
a = False
def lowerCAmelCase_ ( self : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase_ ( self : Dict ) -> List[Any]:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase_ ( self : Optional[Any] ) -> List[str]:
super().test_hidden_states_output()
@slow
def lowerCAmelCase_ ( self : List[str] ) -> Any:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(_snake_case )
def lowerCAmelCase_ ( self : int ) -> Any:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(_snake_case , _snake_case )
SCREAMING_SNAKE_CASE__ = model_class(_snake_case )
@jax.jit
def model_jitted(_snake_case : List[Any] , _snake_case : Optional[int]=None , **_snake_case : Tuple ):
return model(input_ids=_snake_case , attention_mask=_snake_case , **_snake_case )
with self.subTest("JIT Enabled" ):
SCREAMING_SNAKE_CASE__ = model_jitted(**_snake_case ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE__ = model_jitted(**_snake_case ).to_tuple()
self.assertEqual(len(_snake_case ) , len(_snake_case ) )
for jitted_output, output in zip(_snake_case , _snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase_ ( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : Union[str, Any]=1e-5 , _snake_case : Optional[Any]="outputs" , _snake_case : List[Any]=None ) -> Optional[int]:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
| 538
|
"""simple docstring"""
import collections
import os
import re
from pathlib import Path
_A = 'src/transformers'
# Matches is_xxx_available()
_A = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
_A = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_A = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
_A = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
_A = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_A = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
_A = re.compile(R'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
_A = re.compile(R'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
_A = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
_A = re.compile(R'^\s*try:')
# Catches a line with else:
_A = re.compile(R'^\s*else:')
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> List[str]:
if _re_test_backend.search(__UpperCAmelCase ) is None:
return None
SCREAMING_SNAKE_CASE__ = [b[0] for b in _re_backend.findall(__UpperCAmelCase )]
backends.sort()
return "_and_".join(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> Optional[Any]:
with open(__UpperCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE__ = f.readlines()
SCREAMING_SNAKE_CASE__ = 0
while line_index < len(__UpperCAmelCase ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__UpperCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
SCREAMING_SNAKE_CASE__ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
SCREAMING_SNAKE_CASE__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = _re_one_line_import_struct.search(__UpperCAmelCase ).groups()[0]
SCREAMING_SNAKE_CASE__ = re.findall(R"\[([^\]]+)\]" , __UpperCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
SCREAMING_SNAKE_CASE__ = _re_import_struct_key_value.search(__UpperCAmelCase )
if single_line_import_search is not None:
SCREAMING_SNAKE_CASE__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
SCREAMING_SNAKE_CASE__ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
SCREAMING_SNAKE_CASE__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
SCREAMING_SNAKE_CASE__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
SCREAMING_SNAKE_CASE__ = lines[line_index]
if _re_import_struct_add_one.search(__UpperCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__UpperCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__UpperCAmelCase ) is not None:
SCREAMING_SNAKE_CASE__ = _re_import_struct_add_many.search(__UpperCAmelCase ).groups()[0].split(", " )
SCREAMING_SNAKE_CASE__ = [obj[1:-1] for obj in imports if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif _re_between_brackets.search(__UpperCAmelCase ) is not None:
SCREAMING_SNAKE_CASE__ = _re_between_brackets.search(__UpperCAmelCase ).groups()[0].split(", " )
SCREAMING_SNAKE_CASE__ = [obj[1:-1] for obj in imports if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif _re_quote_object.search(__UpperCAmelCase ) is not None:
objects.append(_re_quote_object.search(__UpperCAmelCase ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
SCREAMING_SNAKE_CASE__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
SCREAMING_SNAKE_CASE__ = []
while (
line_index < len(__UpperCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
SCREAMING_SNAKE_CASE__ = lines[line_index]
SCREAMING_SNAKE_CASE__ = _re_import.search(__UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
SCREAMING_SNAKE_CASE__ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__UpperCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
SCREAMING_SNAKE_CASE__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
SCREAMING_SNAKE_CASE__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
SCREAMING_SNAKE_CASE__ = lines[line_index]
SCREAMING_SNAKE_CASE__ = _re_import.search(__UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
SCREAMING_SNAKE_CASE__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
def find_duplicates(__UpperCAmelCase ):
return [k for k, v in collections.Counter(__UpperCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
SCREAMING_SNAKE_CASE__ = []
for key in import_dict_objects.keys():
SCREAMING_SNAKE_CASE__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
SCREAMING_SNAKE_CASE__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
SCREAMING_SNAKE_CASE__ = "base imports" if key == "none" else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
for root, _, files in os.walk(__UpperCAmelCase ):
if "__init__.py" in files:
SCREAMING_SNAKE_CASE__ = os.path.join(__UpperCAmelCase , "__init__.py" )
SCREAMING_SNAKE_CASE__ = parse_init(__UpperCAmelCase )
if objects is not None:
SCREAMING_SNAKE_CASE__ = analyze_results(*__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("\n".join(__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > 0:
raise ValueError("\n\n".join(__UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = []
for path, directories, files in os.walk(__UpperCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__UpperCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__UpperCAmelCase ) / folder).glob("*.py" ) ) ) == 0:
continue
SCREAMING_SNAKE_CASE__ = str((Path(__UpperCAmelCase ) / folder).relative_to(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = short_path.replace(os.path.sep , "." )
submodules.append(__UpperCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
SCREAMING_SNAKE_CASE__ = str((Path(__UpperCAmelCase ) / fname).relative_to(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__UpperCAmelCase )
return submodules
_A = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
SCREAMING_SNAKE_CASE__ = direct_transformers_import(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__UpperCAmelCase , "__init__.py" ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
import_structure_keys.update(set(re.findall(R"import_structure\[\"([^\"]*)\"\]" , __UpperCAmelCase ) ) )
SCREAMING_SNAKE_CASE__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ = "\n".join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registed in the main init of Transformers:\n"
F"""{list_of_modules}\n"""
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 538
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Tuple = logging.get_logger(__name__)
A : Union[str, Any] = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class a_ ( _a ):
a : Tuple = '''wav2vec2'''
def __init__( self , __UpperCamelCase=32 , __UpperCamelCase=768 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=3_072 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-5 , __UpperCamelCase="group" , __UpperCamelCase="gelu" , __UpperCamelCase=(512, 512, 512, 512, 512, 512, 512) , __UpperCamelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCamelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCamelCase=False , __UpperCamelCase=128 , __UpperCamelCase=16 , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=0.05 , __UpperCamelCase=10 , __UpperCamelCase=2 , __UpperCamelCase=0.0 , __UpperCamelCase=10 , __UpperCamelCase=0 , __UpperCamelCase=320 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , __UpperCamelCase=100 , __UpperCamelCase=256 , __UpperCamelCase=256 , __UpperCamelCase=0.1 , __UpperCamelCase="sum" , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=256 , __UpperCamelCase=(512, 512, 512, 512, 1_500) , __UpperCamelCase=(5, 3, 3, 1, 1) , __UpperCamelCase=(1, 2, 3, 1, 1) , __UpperCamelCase=512 , __UpperCamelCase=0 , __UpperCamelCase=1 , __UpperCamelCase=2 , __UpperCamelCase=False , __UpperCamelCase=3 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , ):
super().__init__(**__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase )
_lowercase = hidden_size
_lowercase = feat_extract_norm
_lowercase = feat_extract_activation
_lowercase = list(__UpperCamelCase )
_lowercase = list(__UpperCamelCase )
_lowercase = list(__UpperCamelCase )
_lowercase = conv_bias
_lowercase = num_conv_pos_embeddings
_lowercase = num_conv_pos_embedding_groups
_lowercase = len(self.conv_dim )
_lowercase = num_hidden_layers
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = num_attention_heads
_lowercase = hidden_dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = feat_proj_dropout
_lowercase = final_dropout
_lowercase = layerdrop
_lowercase = layer_norm_eps
_lowercase = initializer_range
_lowercase = vocab_size
_lowercase = do_stable_layer_norm
_lowercase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase = apply_spec_augment
_lowercase = mask_time_prob
_lowercase = mask_time_length
_lowercase = mask_time_min_masks
_lowercase = mask_feature_prob
_lowercase = mask_feature_length
_lowercase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowercase = num_codevectors_per_group
_lowercase = num_codevector_groups
_lowercase = contrastive_logits_temperature
_lowercase = feat_quantizer_dropout
_lowercase = num_negatives
_lowercase = codevector_dim
_lowercase = proj_codevector_dim
_lowercase = diversity_loss_weight
# ctc loss
_lowercase = ctc_loss_reduction
_lowercase = ctc_zero_infinity
# adapter
_lowercase = add_adapter
_lowercase = adapter_kernel_size
_lowercase = adapter_stride
_lowercase = num_adapter_layers
_lowercase = output_hidden_size or hidden_size
_lowercase = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowercase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowercase = list(__UpperCamelCase )
_lowercase = list(__UpperCamelCase )
_lowercase = list(__UpperCamelCase )
_lowercase = xvector_output_dim
@property
def UpperCamelCase_ ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 287
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : Dict = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 287
| 1
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase=2 , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=10 , lowerCAmelCase=3 , lowerCAmelCase=32 * 4 , lowerCAmelCase=32 * 6 , lowerCAmelCase=4 , lowerCAmelCase=32 , ) -> int:
'''simple docstring'''
_lowercase =parent
_lowercase =batch_size
_lowercase =is_training
_lowercase =use_auxiliary_loss
_lowercase =num_queries
_lowercase =num_channels
_lowercase =min_size
_lowercase =max_size
_lowercase =num_labels
_lowercase =mask_feature_size
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCAmelCase )
_lowercase =torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCAmelCase )
_lowercase =(
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCAmelCase ) > 0.5
).float()
_lowercase =(torch.rand((self.batch_size, self.num_labels) , device=lowerCAmelCase ) > 0.5).long()
_lowercase =self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A__ ( self ) -> List[str]:
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =self.prepare_config_and_inputs()
_lowercase ={'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> Any:
'''simple docstring'''
_lowercase =output.encoder_hidden_states
_lowercase =output.pixel_decoder_hidden_states
_lowercase =output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase ) , config.decoder_config.decoder_layers )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ) -> Optional[int]:
'''simple docstring'''
with torch.no_grad():
_lowercase =MaskFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
_lowercase =model(pixel_values=lowerCAmelCase , pixel_mask=lowerCAmelCase )
_lowercase =model(lowerCAmelCase , output_hidden_states=lowerCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCAmelCase , lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[str]:
'''simple docstring'''
_lowercase =MaskFormerForInstanceSegmentation(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
def comm_check_on_output(lowerCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_lowercase =model(pixel_values=lowerCAmelCase , pixel_mask=lowerCAmelCase )
_lowercase =model(lowerCAmelCase )
comm_check_on_output(lowerCAmelCase )
_lowercase =model(
pixel_values=lowerCAmelCase , pixel_mask=lowerCAmelCase , mask_labels=lowerCAmelCase , class_labels=lowerCAmelCase )
comm_check_on_output(lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_a = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_a = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def A__ ( self ) -> List[Any]:
'''simple docstring'''
_lowercase =MaskFormerModelTester(self )
_lowercase =ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCAmelCase , **lowerCAmelCase , output_hidden_states=lowerCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowerCAmelCase )
@unittest.skip(reason='MaskFormer does not use inputs_embeds' )
def A__ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormer is not a generative model' )
def A__ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormer does not use token embeddings' )
def A__ ( self ) -> List[str]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def A__ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def A__ ( self ) -> str:
'''simple docstring'''
pass
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(lowerCAmelCase )
_lowercase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase =[*signature.parameters.keys()]
_lowercase =['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
@slow
def A__ ( self ) -> str:
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
_lowercase =MaskFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =(self.model_tester.min_size,) * 2
_lowercase ={
'pixel_values': torch.randn((2, 3, *size) , device=lowerCAmelCase ),
'mask_labels': torch.randn((2, 10, *size) , device=lowerCAmelCase ),
'class_labels': torch.zeros(2 , 10 , device=lowerCAmelCase ).long(),
}
_lowercase =MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowerCAmelCase )
_lowercase =model(**lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCAmelCase , **lowerCAmelCase , output_hidden_states=lowerCAmelCase )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(lowerCAmelCase ).to(lowerCAmelCase )
_lowercase =model(**lowerCAmelCase , output_attentions=lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_lowercase =self.all_model_classes[1]
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =self.model_tester.prepare_config_and_inputs()
_lowercase =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.train()
_lowercase =model(lowerCAmelCase , mask_labels=lowerCAmelCase , class_labels=lowerCAmelCase ).loss
loss.backward()
def A__ ( self ) -> List[Any]:
'''simple docstring'''
_lowercase =self.all_model_classes[1]
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =self.model_tester.prepare_config_and_inputs()
_lowercase =True
_lowercase =True
_lowercase =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.train()
_lowercase =model(lowerCAmelCase , mask_labels=lowerCAmelCase , class_labels=lowerCAmelCase )
_lowercase =outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowercase =outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_lowercase =outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowercase =outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowercase_ = 1e-4
def a ( ) -> Optional[Any]:
"""simple docstring"""
_lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' )
if is_vision_available()
else None
)
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(lowerCAmelCase )
_lowercase =self.default_image_processor
_lowercase =prepare_img()
_lowercase =image_processor(lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase )
_lowercase =inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCAmelCase , (1, 3, 800, 1_088) )
with torch.no_grad():
_lowercase =model(**lowerCAmelCase )
_lowercase =torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) )
_lowercase =torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) )
_lowercase =torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) )
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase =(
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(lowerCAmelCase )
.eval()
)
_lowercase =self.default_image_processor
_lowercase =prepare_img()
_lowercase =image_processor(lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase )
_lowercase =inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCAmelCase , (1, 3, 800, 1_088) )
with torch.no_grad():
_lowercase =model(**lowerCAmelCase )
# masks_queries_logits
_lowercase =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowercase =[
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
_lowercase =torch.tensor(lowerCAmelCase ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) )
# class_queries_logits
_lowercase =outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowercase =torch.tensor(
[
[1.6512e00, -5.2572e00, -3.3519e00],
[3.6169e-02, -5.9025e00, -2.9313e00],
[1.0766e-04, -7.7630e00, -5.1263e00],
] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
_lowercase =(
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' )
.to(lowerCAmelCase )
.eval()
)
_lowercase =self.default_image_processor
_lowercase =prepare_img()
_lowercase =image_processor(lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase )
_lowercase =inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCAmelCase , (1, 3, 800, 1_088) )
with torch.no_grad():
_lowercase =model(**lowerCAmelCase )
# masks_queries_logits
_lowercase =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowercase =[[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
_lowercase =torch.tensor(lowerCAmelCase ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) )
# class_queries_logits
_lowercase =outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowercase =torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) )
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =(
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(lowerCAmelCase )
.eval()
)
_lowercase =self.default_image_processor
_lowercase =image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
_lowercase =inputs['pixel_values'].to(lowerCAmelCase )
_lowercase =[el.to(lowerCAmelCase ) for el in inputs['mask_labels']]
_lowercase =[el.to(lowerCAmelCase ) for el in inputs['class_labels']]
with torch.no_grad():
_lowercase =model(**lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 380
|
import unittest
from knapsack import knapsack as k
class __lowerCAmelCase ( unittest.TestCase ):
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase =0
_lowercase =[0]
_lowercase =[0]
_lowercase =len(lowerCAmelCase )
self.assertEqual(k.knapsack(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , 0 )
_lowercase =[60]
_lowercase =[10]
_lowercase =len(lowerCAmelCase )
self.assertEqual(k.knapsack(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , 0 )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
_lowercase =3
_lowercase =[1, 2, 3]
_lowercase =[3, 2, 1]
_lowercase =len(lowerCAmelCase )
self.assertEqual(k.knapsack(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , 5 )
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase =50
_lowercase =[60, 100, 120]
_lowercase =[10, 20, 30]
_lowercase =len(lowerCAmelCase )
self.assertEqual(k.knapsack(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 380
| 1
|
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=0.6 , __UpperCAmelCase=None , ) -> Optional[Any]:
A : List[str] = parent
A : Optional[int] = batch_size
A : str = image_size
A : Optional[Any] = patch_size
A : Optional[int] = num_channels
A : Union[str, Any] = is_training
A : int = use_labels
A : Union[str, Any] = hidden_size
A : Optional[Any] = num_hidden_layers
A : str = num_attention_heads
A : str = intermediate_size
A : Dict = hidden_act
A : Optional[int] = hidden_dropout_prob
A : Optional[int] = attention_probs_dropout_prob
A : Optional[Any] = type_sequence_label_size
A : List[str] = initializer_range
A : Dict = mask_ratio
A : str = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
A : Dict = (image_size // patch_size) ** 2
A : int = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case ( self ) -> int:
A : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : int = None
if self.use_labels:
A : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Optional[int] = self.get_config()
return config, pixel_values, labels
def snake_case ( self ) -> List[str]:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
A : str = TFViTMAEModel(config=__UpperCAmelCase )
A : Optional[Any] = model(__UpperCAmelCase , training=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
A : Dict = TFViTMAEForPreTraining(__UpperCAmelCase )
A : str = model(__UpperCAmelCase , training=__UpperCAmelCase )
# expected sequence length = num_patches
A : Optional[Any] = (self.image_size // self.patch_size) ** 2
A : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
A : str = 1
A : Optional[Any] = TFViTMAEForPreTraining(__UpperCAmelCase )
A : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : Optional[Any] = model(__UpperCAmelCase , training=__UpperCAmelCase )
A : List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case ( self ) -> str:
A : Dict = self.prepare_config_and_inputs()
((A) , (A) , (A)) : Optional[Any] = config_and_inputs
A : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
UpperCAmelCase_ : Optional[int] = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : int = False
def snake_case ( self ) -> Any:
A : Any = TFViTMAEModelTester(self )
A : Optional[Any] = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def snake_case ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def snake_case ( self ) -> Tuple:
pass
def snake_case ( self ) -> Optional[int]:
A , A : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : List[str] = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
A : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , tf.keras.layers.Layer ) )
def snake_case ( self ) -> Dict:
A , A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : str = model_class(__UpperCAmelCase )
A : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Union[str, Any] = [*signature.parameters.keys()]
A : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def snake_case ( self ) -> str:
A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def snake_case ( self ) -> str:
A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase )
def snake_case ( self ) -> List[str]:
# make the mask reproducible
np.random.seed(2 )
A , A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A : Tuple = int((config.image_size // config.patch_size) ** 2 )
A : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
A : Dict = model_class(__UpperCAmelCase )
A : Dict = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
A : List[Any] = model(__UpperCAmelCase , noise=__UpperCAmelCase )
A : Union[str, Any] = copy.deepcopy(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
A : Optional[Any] = model(**__UpperCAmelCase , noise=__UpperCAmelCase )
A : Any = outputs_dict[0].numpy()
A : Optional[Any] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def snake_case ( self ) -> Dict:
# make the mask reproducible
np.random.seed(2 )
A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A : Dict = int((config.image_size // config.patch_size) ** 2 )
A : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__UpperCAmelCase ):
A : Dict = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__UpperCAmelCase ):
A : int = v.numpy()
else:
A : int = np.array(__UpperCAmelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
A : Dict = model_class(__UpperCAmelCase )
A : List[str] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
A : Optional[Any] = prepare_numpy_arrays(__UpperCAmelCase )
A : Dict = model(__UpperCAmelCase , noise=__UpperCAmelCase )
A : List[str] = model(**__UpperCAmelCase , noise=__UpperCAmelCase )
self.assert_outputs_same(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
# make masks reproducible
np.random.seed(2 )
A : Optional[Any] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
A : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A : List[str] = tf.constant(__UpperCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
A : Union[str, Any] = tf_noise
super().check_pt_tf_models(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ) -> Any:
# make mask reproducible
np.random.seed(2 )
A , A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A : str = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__UpperCAmelCase )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(__UpperCAmelCase , __UpperCAmelCase ),)
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__UpperCAmelCase , '''_keras_serializable''' , __UpperCAmelCase )
}
A : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
A : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A : List[Any] = tf.convert_to_tensor(__UpperCAmelCase )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
A : str = main_layer_class(__UpperCAmelCase )
A : Tuple = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
A : Any = tf.keras.Model(__UpperCAmelCase , outputs=main_layer(__UpperCAmelCase ) )
A : List[Any] = model(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
A : Any = os.path.join(__UpperCAmelCase , '''keras_model.h5''' )
model.save(__UpperCAmelCase )
A : Optional[int] = tf.keras.models.load_model(
__UpperCAmelCase , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__UpperCAmelCase , tf.keras.Model )
A : Dict = model(__UpperCAmelCase )
self.assert_outputs_same(__UpperCAmelCase , __UpperCAmelCase )
@slow
def snake_case ( self ) -> Tuple:
# make mask reproducible
np.random.seed(2 )
A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A : Any = int((config.image_size // config.patch_size) ** 2 )
A : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
A : Optional[Any] = model_class(__UpperCAmelCase )
A : List[Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
A : Tuple = model(__UpperCAmelCase , noise=__UpperCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
A : List[Any] = outputs.last_hidden_state.numpy()
A : Dict = 0
else:
A : Tuple = outputs.logits.numpy()
A : List[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCAmelCase , saved_model=__UpperCAmelCase )
A : Tuple = model_class.from_pretrained(__UpperCAmelCase )
A : Tuple = model(__UpperCAmelCase , noise=__UpperCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
A : int = after_outputs['''last_hidden_state'''].numpy()
A : List[Any] = 0
else:
A : Union[str, Any] = after_outputs['''logits'''].numpy()
A : Any = 0
A : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCAmelCase , 1E-5 )
def snake_case ( self ) -> List[Any]:
# make mask reproducible
np.random.seed(2 )
A , A : str = self.model_tester.prepare_config_and_inputs_for_common()
A : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
A : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
A : int = model_class(__UpperCAmelCase )
A : Tuple = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
A : str = model(__UpperCAmelCase , noise=__UpperCAmelCase )
A : int = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__UpperCAmelCase )
A : Any = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
A : Optional[Any] = model_class.from_config(model.config )
A : int = new_model(__UpperCAmelCase ) # Build model
new_model.set_weights(model.get_weights() )
A : Optional[Any] = new_model(__UpperCAmelCase , noise=__UpperCAmelCase )
self.assert_outputs_same(__UpperCAmelCase , __UpperCAmelCase )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def snake_case ( self ) -> Dict:
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def snake_case ( self ) -> Union[str, Any]:
pass
@slow
def snake_case ( self ) -> Union[str, Any]:
A : Any = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(__UpperCAmelCase )
def snake_case__ ( ):
A : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ) -> str:
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def snake_case ( self ) -> List[Any]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
A : List[str] = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
A : int = self.default_image_processor
A : str = prepare_img()
A : Any = image_processor(images=__UpperCAmelCase , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
A : Tuple = ViTMAEConfig()
A : Optional[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
A : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
A : Optional[int] = model(**__UpperCAmelCase , noise=__UpperCAmelCase )
# verify the logits
A : List[Any] = tf.convert_to_tensor([1, 1_96, 7_68] )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
A : Union[str, Any] = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , __UpperCAmelCase , atol=1E-4 )
| 542
|
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ) -> Union[str, Any]:
A : Dict = logging.get_logger()
# the current default level is logging.WARNING
A : List[Any] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(__UpperCAmelCase )
def snake_case ( self ) -> str:
A : Any = logging.get_verbosity()
A : Optional[Any] = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
A : Tuple = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(__UpperCAmelCase ) as cl:
logger.warning(__UpperCAmelCase )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(__UpperCAmelCase ) as cl:
logger.warning(__UpperCAmelCase )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(__UpperCAmelCase ) as cl:
logger.warning(__UpperCAmelCase )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(__UpperCAmelCase )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def snake_case ( self ) -> Optional[int]:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
A : int = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
A : Any = os.getenv('''TRANSFORMERS_VERBOSITY''' , __UpperCAmelCase )
A : List[str] = logging.log_levels[env_level_str]
A : Optional[int] = logging.get_verbosity()
self.assertEqual(
__UpperCAmelCase , __UpperCAmelCase , f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , )
# restore to the original level
A : str = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def snake_case ( self ) -> Optional[int]:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
A : str = logging.logging.getLogger()
with CaptureLogger(__UpperCAmelCase ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def snake_case ( self ) -> Optional[int]:
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
A : Union[str, Any] = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
A : Optional[int] = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(__UpperCAmelCase ) as cl:
logger.warning_advice(__UpperCAmelCase )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(__UpperCAmelCase ) as cl:
logger.warning_advice(__UpperCAmelCase )
self.assertEqual(cl.out , msg + '''\n''' )
def snake_case__ ( ):
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 542
| 1
|
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class __lowercase( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase = nn.Linear(3 , 4 )
_lowerCAmelCase = nn.BatchNormad(4 )
_lowerCAmelCase = nn.Linear(4 , 5 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , _lowerCAmelCase : int ) -> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(_lowerCAmelCase ) ) )
class __lowercase( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
_lowerCAmelCase = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_lowerCAmelCase , model.state_dict() )
_lowerCAmelCase = os.path.join(_lowerCAmelCase , 'index.json' )
self.assertTrue(os.path.isfile(_lowerCAmelCase ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
_lowerCAmelCase = os.path.join(_lowerCAmelCase , F'''{key}.dat''' )
self.assertTrue(os.path.isfile(_lowerCAmelCase ) )
# TODO: add tests on the fact weights are properly loaded
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[Any]:
_lowerCAmelCase = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
_lowerCAmelCase = torch.randn(2 , 3 , dtype=_lowerCAmelCase )
with TemporaryDirectory() as tmp_dir:
_lowerCAmelCase = offload_weight(_lowerCAmelCase , 'weight' , _lowerCAmelCase , {} )
_lowerCAmelCase = os.path.join(_lowerCAmelCase , 'weight.dat' )
self.assertTrue(os.path.isfile(_lowerCAmelCase ) )
self.assertDictEqual(_lowerCAmelCase , {'weight': {'shape': [2, 3], 'dtype': str(_lowerCAmelCase ).split('.' )[1]}} )
_lowerCAmelCase = load_offloaded_weight(_lowerCAmelCase , index['weight'] )
self.assertTrue(torch.equal(_lowerCAmelCase , _lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[str]:
_lowerCAmelCase = ModelForTest()
_lowerCAmelCase = model.state_dict()
_lowerCAmelCase = {k: v for k, v in state_dict.items() if 'linear2' not in k}
_lowerCAmelCase = {k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = OffloadedWeightsLoader(state_dict=_lowerCAmelCase , save_folder=_lowerCAmelCase )
# Every key is there with the right value
self.assertEqual(sorted(_lowerCAmelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_lowerCAmelCase , weight_map[key] ) )
_lowerCAmelCase = {k: v for k, v in state_dict.items() if 'weight' in k}
_lowerCAmelCase = {k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = OffloadedWeightsLoader(state_dict=_lowerCAmelCase , save_folder=_lowerCAmelCase )
# Every key is there with the right value
self.assertEqual(sorted(_lowerCAmelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_lowerCAmelCase , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_lowerCAmelCase , _lowerCAmelCase )
# Duplicates are removed
_lowerCAmelCase = OffloadedWeightsLoader(state_dict=_lowerCAmelCase , save_folder=_lowerCAmelCase )
# Every key is there with the right value
self.assertEqual(sorted(_lowerCAmelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_lowerCAmelCase , weight_map[key] ) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> int:
_lowerCAmelCase = {'a.1': 0, 'a.10': 1, 'a.2': 2}
_lowerCAmelCase = extract_submodules_state_dict(_lowerCAmelCase , ['a.1', 'a.2'] )
self.assertDictEqual(_lowerCAmelCase , {'a.1': 0, 'a.2': 2} )
_lowerCAmelCase = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
_lowerCAmelCase = extract_submodules_state_dict(_lowerCAmelCase , ['a.1', 'a.2'] )
self.assertDictEqual(_lowerCAmelCase , {'a.1.a': 0, 'a.2.a': 2} )
| 585
|
from math import isqrt
def _a ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(__SCREAMING_SNAKE_CASE ) + 1 ) )
def _a ( __SCREAMING_SNAKE_CASE : int = 10**6 ):
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 7
while prime_candidate < max_prime:
primes_count += is_prime(__SCREAMING_SNAKE_CASE )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"{solution() = }")
| 585
| 1
|
"""simple docstring"""
from typing import Any
import numpy as np
def lowercase_ ( _lowercase : np.ndarray ):
'''simple docstring'''
return np.array_equal(_lowercase , matrix.conjugate().T )
def lowercase_ ( _lowercase : np.ndarray , _lowercase : np.ndarray ):
'''simple docstring'''
UpperCAmelCase : Tuple = v.conjugate().T
UpperCAmelCase : Any = v_star.dot(_lowercase )
assert isinstance(_lowercase , np.ndarray )
return (v_star_dot.dot(_lowercase )) / (v_star.dot(_lowercase ))
def lowercase_ ( ):
'''simple docstring'''
UpperCAmelCase : str = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
UpperCAmelCase : Any = np.array([[1], [2], [3]] )
assert is_hermitian(_lowercase ), F"""{a} is not hermitian."""
print(rayleigh_quotient(_lowercase , _lowercase ) )
UpperCAmelCase : Optional[Any] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowercase ), F"""{a} is not hermitian."""
assert rayleigh_quotient(_lowercase , _lowercase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 595
|
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : List[Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
snake_case_ : List[str] = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowercase_ ( _lowercase : List[Any] , _lowercase : str , _lowercase : Tuple , _lowercase : str , _lowercase : List[Any] ):
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase : Dict = getattr(_lowercase , _lowercase )
if weight_type is not None:
UpperCAmelCase : Any = getattr(_lowercase , _lowercase ).shape
else:
UpperCAmelCase : str = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCAmelCase : Optional[Any] = value
elif weight_type == "weight_g":
UpperCAmelCase : Tuple = value
elif weight_type == "weight_v":
UpperCAmelCase : Optional[Any] = value
elif weight_type == "bias":
UpperCAmelCase : Union[str, Any] = value
else:
UpperCAmelCase : Optional[Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowercase_ ( _lowercase : Optional[Any] , _lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase : Dict = []
UpperCAmelCase : Any = fairseq_model.state_dict()
UpperCAmelCase : Tuple = hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase : Tuple = True
if "*" in mapped_key:
UpperCAmelCase : Dict = name.split(_lowercase )[0].split("." )[-2]
UpperCAmelCase : Any = mapped_key.replace("*" , _lowercase )
if "weight_g" in name:
UpperCAmelCase : Any = "weight_g"
elif "weight_v" in name:
UpperCAmelCase : List[Any] = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
UpperCAmelCase : Union[str, Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase : List[Any] = "weight"
else:
UpperCAmelCase : Optional[int] = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowercase_ ( _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : Dict = full_name.split("conv_layers." )[-1]
UpperCAmelCase : Optional[Any] = name.split("." )
UpperCAmelCase : Optional[Any] = int(items[0] )
UpperCAmelCase : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCAmelCase : Tuple = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCAmelCase : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCAmelCase : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCAmelCase : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowercase )
@torch.no_grad()
def lowercase_ ( _lowercase : List[Any] , _lowercase : Union[str, Any] , _lowercase : Optional[Any]=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = torch.load(_lowercase )
UpperCAmelCase : Optional[int] = WavLMConfigOrig(checkpoint["cfg"] )
UpperCAmelCase : Union[str, Any] = WavLMOrig(_lowercase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
UpperCAmelCase : List[str] = WavLMConfig.from_pretrained(_lowercase )
else:
UpperCAmelCase : int = WavLMConfig()
UpperCAmelCase : List[str] = WavLMModel(_lowercase )
recursively_load_weights(_lowercase , _lowercase )
hf_wavlm.save_pretrained(_lowercase )
if __name__ == "__main__":
snake_case_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
snake_case_ : Any = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 595
| 1
|
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __snake_case :
def __init__( self : Union[str, Any] , _lowercase : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = data
SCREAMING_SNAKE_CASE__ = None
class __snake_case :
def __init__( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
def __iter__( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.head
while self.head:
yield node.data
SCREAMING_SNAKE_CASE__ = node.next
if node == self.head:
break
def __len__( self : int ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self : Optional[int] ):
"""simple docstring"""
return "->".join(str(_lowercase ) for item in iter(self ) )
def __a ( self : Optional[int] , _lowercase : Any ):
"""simple docstring"""
self.insert_nth(len(self ) , _lowercase )
def __a ( self : Tuple , _lowercase : Any ):
"""simple docstring"""
self.insert_nth(0 , _lowercase )
def __a ( self : Tuple , _lowercase : int , _lowercase : Any ):
"""simple docstring"""
if index < 0 or index > len(self ):
raise IndexError("""list index out of range.""" )
SCREAMING_SNAKE_CASE__ = Node(_lowercase )
if self.head is None:
SCREAMING_SNAKE_CASE__ = new_node # first node points itself
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = new_node
elif index == 0: # insert at head
SCREAMING_SNAKE_CASE__ = self.head
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = new_node
else:
SCREAMING_SNAKE_CASE__ = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE__ = temp.next
SCREAMING_SNAKE_CASE__ = temp.next
SCREAMING_SNAKE_CASE__ = new_node
if index == len(self ) - 1: # insert at tail
SCREAMING_SNAKE_CASE__ = new_node
def __a ( self : Dict ):
"""simple docstring"""
return self.delete_nth(0 )
def __a ( self : str ):
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def __a ( self : str , _lowercase : int = 0 ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise IndexError("""list index out of range.""" )
SCREAMING_SNAKE_CASE__ = self.head
if self.head == self.tail: # just one node
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = None
elif index == 0: # delete head node
SCREAMING_SNAKE_CASE__ = self.tail.next.next
SCREAMING_SNAKE_CASE__ = self.head.next
else:
SCREAMING_SNAKE_CASE__ = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE__ = temp.next
SCREAMING_SNAKE_CASE__ = temp.next
SCREAMING_SNAKE_CASE__ = temp.next.next
if index == len(self ) - 1: # delete at tail
SCREAMING_SNAKE_CASE__ = temp
return delete_node.data
def __a ( self : Optional[int] ):
"""simple docstring"""
return len(self ) == 0
def __SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = CircularLinkedList()
assert len(__UpperCamelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(__UpperCamelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(__UpperCamelCase ) == i
circular_linked_list.insert_nth(__UpperCamelCase , i + 1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 379
| 0
|
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __A ( self ) -> List[str]:
_UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case_ , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(snake_case_ , "num_attention_heads" ) )
class a :
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=64 , snake_case_=3 , snake_case_=3 , snake_case_=2 , snake_case_=1 , snake_case_=16 , snake_case_=[128, 256, 384] , snake_case_=[4, 6, 8] , snake_case_=[2, 3, 4] , snake_case_=[16, 16, 16] , snake_case_=0 , snake_case_=[2, 2, 2] , snake_case_=[2, 2, 2] , snake_case_=0.02 , snake_case_=True , snake_case_=True , snake_case_=2 , ) -> List[str]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = kernel_size
_UpperCAmelCase = stride
_UpperCAmelCase = padding
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = depths
_UpperCAmelCase = key_dim
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = patch_size
_UpperCAmelCase = attention_ratio
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = initializer_range
_UpperCAmelCase = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = num_labels
_UpperCAmelCase = initializer_range
def __A ( self ) -> List[str]:
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Union[str, Any]:
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def __A ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
_UpperCAmelCase = LevitModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ )
_UpperCAmelCase = (self.image_size, self.image_size)
_UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1]
for _ in range(4 ):
_UpperCAmelCase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
_UpperCAmelCase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def __A ( self , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LevitForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ) -> Dict:
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
A__ : List[Any] = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
A__ : List[Any] = False
A__ : Union[str, Any] = False
A__ : str = False
A__ : Any = False
A__ : int = False
def __A ( self ) -> int:
_UpperCAmelCase = LevitModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def __A ( self ) -> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self ) -> Dict:
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def __A ( self ) -> Tuple:
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def __A ( self ) -> Optional[int]:
pass
@unittest.skip(reason="Levit does not output attentions" )
def __A ( self ) -> Dict:
pass
def __A ( self ) -> Any:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __A ( self ) -> str:
def check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ):
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = len(self.model_tester.depths ) + 1
self.assertEqual(len(snake_case_ ) , snake_case_ )
_UpperCAmelCase = (self.model_tester.image_size, self.model_tester.image_size)
_UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1]
for _ in range(4 ):
_UpperCAmelCase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
_UpperCAmelCase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __A ( self ) -> Optional[Any]:
pass
def __A ( self , snake_case_ , snake_case_ , snake_case_=False ) -> Any:
_UpperCAmelCase = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __A ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __A ( self ) -> int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
def __A ( self ) -> Optional[Any]:
if not self.model_tester.is_training:
return
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(snake_case_ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
_UpperCAmelCase = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
_UpperCAmelCase = model(**snake_case_ ).loss
loss.backward()
def __A ( self ) -> Tuple:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_UpperCAmelCase = False
_UpperCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(snake_case_ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
_UpperCAmelCase = model_class(snake_case_ )
model.gradient_checkpointing_enable()
model.to(snake_case_ )
model.train()
_UpperCAmelCase = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
_UpperCAmelCase = model(**snake_case_ ).loss
loss.backward()
def __A ( self ) -> str:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(snake_case_ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
_UpperCAmelCase = problem_type["title"]
_UpperCAmelCase = problem_type["num_labels"]
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
_UpperCAmelCase = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if problem_type["num_labels"] > 1:
_UpperCAmelCase = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
_UpperCAmelCase = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=snake_case_ ) as warning_list:
_UpperCAmelCase = model(**snake_case_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def __A ( self ) -> Tuple:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LevitModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def A__ ( ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self ) -> Tuple:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __A ( self ) -> Tuple:
_UpperCAmelCase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
snake_case_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=snake_case_ , return_tensors="pt" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_UpperCAmelCase = torch.tensor([1.04_48, -0.37_45, -1.83_17] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1e-4 ) )
| 426
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=0.1 , snake_case_=0.1 , snake_case_=224 , snake_case_=1000 , snake_case_=[3, 3, 6, 4] , snake_case_=[48, 56, 112, 220] , ) -> Any:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = num_labels
_UpperCAmelCase = image_size
_UpperCAmelCase = layer_depths
_UpperCAmelCase = embed_dims
def __A ( self ) -> int:
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Tuple:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=snake_case_ , layer_scale_init_value=1e-5 , )
def __A ( self , snake_case_ , snake_case_ , snake_case_ ) -> int:
_UpperCAmelCase = SwiftFormerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __A ( self , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = SwiftFormerForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
_UpperCAmelCase = SwiftFormerForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ) -> List[str]:
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = self.prepare_config_and_inputs()
_UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
A__ : int = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
A__ : Dict = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
A__ : List[str] = False
A__ : Optional[Any] = False
A__ : Any = False
A__ : Union[str, Any] = False
A__ : str = False
def __A ( self ) -> int:
_UpperCAmelCase = SwiftFormerModelTester(self )
_UpperCAmelCase = ConfigTester(
self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def __A ( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def __A ( self ) -> List[Any]:
pass
def __A ( self ) -> List[Any]:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def __A ( self ) -> Dict:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __A ( self ) -> str:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __A ( self ) -> Union[str, Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def __A ( self ) -> Union[str, Any]:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = SwiftFormerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def __A ( self ) -> Optional[Any]:
pass
def __A ( self ) -> Tuple:
def check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ):
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = 8
self.assertEqual(len(snake_case_ ) , snake_case_ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(snake_case_ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def __A ( self ) -> int:
def _config_zero_init(snake_case_ ):
_UpperCAmelCase = copy.deepcopy(snake_case_ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(snake_case_ , snake_case_ , 1e-1_0 )
if isinstance(getattr(snake_case_ , snake_case_ , snake_case_ ) , snake_case_ ):
_UpperCAmelCase = _config_zero_init(getattr(snake_case_ , snake_case_ ) )
setattr(snake_case_ , snake_case_ , snake_case_ )
return configs_no_init
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = _config_zero_init(snake_case_ )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(config=snake_case_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __A ( self ) -> Optional[Any]:
pass
def A__ ( ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self ) -> List[str]:
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def __A ( self ) -> Optional[Any]:
_UpperCAmelCase = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(snake_case_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=snake_case_ , return_tensors="pt" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_UpperCAmelCase = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1e-4 ) )
| 426
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : Tuple = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class __a (lowerCamelCase , lowerCamelCase ):
__a : List[str] = "convnextv2"
def __init__( self : List[str] , __magic_name__ : List[Any]=3 , __magic_name__ : Tuple=4 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : Tuple=None , __magic_name__ : List[str]=None , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : List[Any]=0.0_2 , __magic_name__ : Optional[int]=1E-12 , __magic_name__ : int=0.0 , __magic_name__ : int=2_24 , __magic_name__ : str=None , __magic_name__ : Any=None , **__magic_name__ : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**__magic_name__ )
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : Any = patch_size
UpperCAmelCase_ : List[Any] = num_stages
UpperCAmelCase_ : Dict = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
UpperCAmelCase_ : str = [3, 3, 9, 3] if depths is None else depths
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Any = drop_path_rate
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : Optional[int] = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_aligned_output_features_output_indices(
out_features=__magic_name__ , out_indices=__magic_name__ , stage_names=self.stage_names )
| 644
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> str:
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCAmelCase_ : Union[str, Any] = len(bin(SCREAMING_SNAKE_CASE__ )[3:] )
UpperCAmelCase_ : Union[str, Any] = bin(abs(SCREAMING_SNAKE_CASE__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_ : Optional[Any] = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE__ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
| 1
|
def lowerCAmelCase_ ( __a ) -> list:
"""simple docstring"""
for i in range(len(__a ) - 1 , 0 , -1 ):
lowerCamelCase__: List[Any] =False
for j in range(__a , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowerCamelCase__ , lowerCamelCase__: Any =unsorted[j - 1], unsorted[j]
lowerCamelCase__: Tuple =True
for j in range(__a ):
if unsorted[j] > unsorted[j + 1]:
lowerCamelCase__ , lowerCamelCase__: Optional[int] =unsorted[j + 1], unsorted[j]
lowerCamelCase__: Optional[int] =True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = input("Enter numbers separated by a comma:\n").strip()
__A = [int(item) for item in user_input.split(",")]
print(f'{cocktail_shaker_sort(unsorted) = }')
| 59
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowerCamelCase , lowerCamelCase = 13 , lowerCamelCase = 64 , lowerCamelCase = 2 , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = True , lowerCamelCase = True , lowerCamelCase = 1_28 , lowerCamelCase=[16, 32, 64, 1_28] , lowerCamelCase = 7 , lowerCamelCase = 4 , lowerCamelCase = 37 , lowerCamelCase = "gelu" , lowerCamelCase = 0.1 , lowerCamelCase = 0.1 , lowerCamelCase = 10 , lowerCamelCase = 0.0_2 , lowerCamelCase = 2 , lowerCamelCase = 1 , lowerCamelCase = 1_28 , lowerCamelCase = [2, 2, 2, 2] , lowerCamelCase = 2 , lowerCamelCase = 2 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = encoder_stride
snake_case__ = num_attention_outputs
snake_case__ = embed_dim
snake_case__ = embed_dim + 1
snake_case__ = resolution
snake_case__ = depths
snake_case__ = hidden_sizes
snake_case__ = dim
snake_case__ = mlp_expansion_ratio
def A_ ( self ):
snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ = self.get_config()
return config, pixel_values, labels
def A_ ( self ):
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
snake_case__ = TFEfficientFormerModel(config=lowerCamelCase )
snake_case__ = model(lowerCamelCase , training=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
snake_case__ = self.type_sequence_label_size
snake_case__ = TFEfficientFormerForImageClassification(lowerCamelCase )
snake_case__ = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ = 1
snake_case__ = TFEfficientFormerForImageClassification(lowerCamelCase )
snake_case__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A_ ( self ):
snake_case__ = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ = config_and_inputs
snake_case__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
_A : str = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_A : List[str] = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_A : Optional[Any] = False
_A : List[Any] = False
_A : Tuple = False
_A : List[Any] = False
_A : Any = False
def A_ ( self ):
snake_case__ = TFEfficientFormerModelTester(self )
snake_case__ = ConfigTester(
self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def A_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def A_ ( self ):
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def A_ ( self ):
pass
def A_ ( self ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(lowerCamelCase )
snake_case__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ = [*signature.parameters.keys()]
snake_case__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def A_ ( self ):
def check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
snake_case__ = model_class(lowerCamelCase )
snake_case__ = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase )
snake_case__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
if hasattr(self.model_tester , "encoder_seq_length" ):
snake_case__ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
snake_case__ = seq_length * self.model_tester.chunk_length
else:
snake_case__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
snake_case__ = outputs.decoder_hidden_states
self.asseretIsInstance(lowerCamelCase , (list, tuple) )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
snake_case__ = getattr(self.model_tester , "seq_length" , lowerCamelCase )
snake_case__ = getattr(self.model_tester , "decoder_seq_length" , lowerCamelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
snake_case__ = super()._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase )
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def A_ ( self ):
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = TFEfficientFormerModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def A_ ( self ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = True
snake_case__ = getattr(self.model_tester , "seq_length" , lowerCamelCase )
snake_case__ = getattr(self.model_tester , "encoder_seq_length" , lowerCamelCase )
snake_case__ = getattr(self.model_tester , "key_length" , lowerCamelCase )
snake_case__ = getattr(self.model_tester , "chunk_length" , lowerCamelCase )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
snake_case__ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
snake_case__ = True
snake_case__ = False
snake_case__ = True
snake_case__ = model_class(lowerCamelCase )
snake_case__ = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase )
snake_case__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case__ = True
snake_case__ = model_class(lowerCamelCase )
snake_case__ = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase )
snake_case__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def A_ ( self ):
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
snake_case__ = model_class(lowerCamelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
snake_case__ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCamelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
snake_case__ = model(lowerCamelCase )
self.assertTrue(outputs_dict is not None )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def A_ ( self ):
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def A_ ( self ):
snake_case__ = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(images=lowerCamelCase , return_tensors="tf" )
# forward pass
snake_case__ = model(**lowerCamelCase , training=lowerCamelCase )
# verify the logits
snake_case__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
snake_case__ = tf.constant([-0.0_5_5_5, 0.4_8_2_5, -0.0_8_5_2] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
@slow
def A_ ( self ):
snake_case__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(images=lowerCamelCase , return_tensors="tf" )
# forward pass
snake_case__ = model(**lowerCamelCase , training=lowerCamelCase )
# verify the logits
snake_case__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
snake_case__ = tf.constant([-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
| 276
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=4 , ):
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_attention_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_choices
def UpperCamelCase( self ):
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_attention_mask:
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase( self ):
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def UpperCamelCase( self ):
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = True
_snake_case = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Optional[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase( self ):
_snake_case = FlaxRobertaModelTester(self )
@slow
def UpperCamelCase( self ):
for model_class_name in self.all_model_classes:
_snake_case = model_class_name.from_pretrained("roberta-base" , from_pt=lowerCamelCase )
_snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase )
| 368
|
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = LongformerTokenizer
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Union[str, Any] = LongformerTokenizerFast
UpperCAmelCase__ : List[str] = True
def UpperCamelCase( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
_snake_case = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
_snake_case = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_snake_case = {"unk_token": "<unk>"}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase ) )
def UpperCamelCase( self , **lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase )
def UpperCamelCase( self , **lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = "lower newer"
_snake_case = "lower newer"
return input_text, output_text
def UpperCamelCase( self ):
_snake_case = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_snake_case = "lower newer"
_snake_case = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
_snake_case = tokenizer.tokenize(lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(lowerCamelCase , lowerCamelCase )
_snake_case = tokens + [tokenizer.unk_token]
_snake_case = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
def UpperCamelCase( self ):
_snake_case = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=lowerCamelCase ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=lowerCamelCase ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def UpperCamelCase( self ):
_snake_case = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
_snake_case = tokenizer.encode("sequence builders" , add_special_tokens=lowerCamelCase )
_snake_case = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCamelCase )
_snake_case = tokenizer.encode(
"sequence builders" , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase )
_snake_case = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase( self ):
_snake_case = self.get_tokenizer()
_snake_case = "Encode this sequence."
_snake_case = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
_snake_case = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCamelCase , lowerCamelCase )
_snake_case = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCamelCase , lowerCamelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
_snake_case = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCamelCase , lowerCamelCase )
# Testing spaces after special tokens
_snake_case = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase )} ) # mask token has a left space
_snake_case = tokenizer.convert_tokens_to_ids(lowerCamelCase )
_snake_case = "Encode <mask> sequence"
_snake_case = "Encode <mask>sequence"
_snake_case = tokenizer.encode(lowerCamelCase )
_snake_case = encoded.index(lowerCamelCase )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCamelCase , lowerCamelCase )
_snake_case = tokenizer.encode(lowerCamelCase )
_snake_case = encoded.index(lowerCamelCase )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCamelCase , lowerCamelCase )
def UpperCamelCase( self ):
pass
def UpperCamelCase( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
_snake_case = self.tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
_snake_case = "A, <mask> AllenNLP sentence."
_snake_case = tokenizer_r.encode_plus(lowerCamelCase , add_special_tokens=lowerCamelCase , return_token_type_ids=lowerCamelCase )
_snake_case = tokenizer_p.encode_plus(lowerCamelCase , add_special_tokens=lowerCamelCase , return_token_type_ids=lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
_snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
_snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def UpperCamelCase( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_snake_case = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_snake_case = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_snake_case = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , lowerCamelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , lowerCamelCase )
self.assertEqual(post_processor_state["trim_offsets"] , lowerCamelCase )
def UpperCamelCase( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
_snake_case = F'''{text_of_1_token} {text_of_1_token}'''
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_snake_case = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase ) + 1, len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_snake_case = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase ) + 1, len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_snake_case = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase ), len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_snake_case = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase ), len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
_snake_case = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_snake_case = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase ) + 1, 1 + len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_snake_case = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase ), 1 + len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase )
_snake_case = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase ), 1 + len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
| 368
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 645
|
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = 1_0
lowerCAmelCase : Optional[int] = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
lowerCAmelCase : Dict = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [9_7], "text": ["1976"]}] * 1_0,
"id": list(range(SCREAMING_SNAKE_CASE ) ),
} , features=SCREAMING_SNAKE_CASE , )
return dataset
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=SCREAMING_SNAKE_CASE )
return filename
# FILE_CONTENT + files
lowerCAmelCase__ = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.txt"
lowerCAmelCase : Optional[Any] = FILE_CONTENT
with open(SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE )
return filename
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
import bza
lowerCAmelCase : str = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
lowerCAmelCase : Optional[int] = bytes(SCREAMING_SNAKE_CASE , "utf-8" )
with bza.open(SCREAMING_SNAKE_CASE , "wb" ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
import gzip
lowerCAmelCase : Tuple = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
lowerCAmelCase : List[Any] = bytes(SCREAMING_SNAKE_CASE , "utf-8" )
with gzip.open(SCREAMING_SNAKE_CASE , "wb" ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowerCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
lowerCAmelCase : List[str] = bytes(SCREAMING_SNAKE_CASE , "utf-8" )
with lza.frame.open(SCREAMING_SNAKE_CASE , "wb" ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowerCAmelCase : str = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(SCREAMING_SNAKE_CASE , "w" ) as archive:
archive.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
import tarfile
lowerCAmelCase : int = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(SCREAMING_SNAKE_CASE , "w" ) as f:
f.add(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
import lzma
lowerCAmelCase : Tuple = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
lowerCAmelCase : Dict = bytes(SCREAMING_SNAKE_CASE , "utf-8" )
with lzma.open(SCREAMING_SNAKE_CASE , "wb" ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
import zipfile
lowerCAmelCase : List[str] = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
lowerCAmelCase : Any = bytes(SCREAMING_SNAKE_CASE , "utf-8" )
with zstd.open(SCREAMING_SNAKE_CASE , "wb" ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.xml"
lowerCAmelCase : Optional[Any] = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE )
return filename
lowerCAmelCase__ = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
lowerCAmelCase__ = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
lowerCAmelCase__ = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
lowerCAmelCase__ = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
lowerCAmelCase__ = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope="session" )
def a__ ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = datasets.Dataset.from_dict(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Any = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(SCREAMING_SNAKE_CASE ) ) as con:
lowerCAmelCase : Any = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : int = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(SCREAMING_SNAKE_CASE , "w" , newline="" ) as f:
lowerCAmelCase : Union[str, Any] = csv.DictWriter(SCREAMING_SNAKE_CASE , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(SCREAMING_SNAKE_CASE , "w" , newline="" ) as f:
lowerCAmelCase : List[Any] = csv.DictWriter(SCREAMING_SNAKE_CASE , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
import bza
lowerCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(SCREAMING_SNAKE_CASE , "rb" ) as f:
lowerCAmelCase : Dict = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(SCREAMING_SNAKE_CASE , "wb" ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : str = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Any = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join("main_dir" , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join("main_dir" , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
lowerCAmelCase : Union[str, Any] = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(SCREAMING_SNAKE_CASE , "wb" ) as f:
lowerCAmelCase : Optional[int] = pq.ParquetWriter(SCREAMING_SNAKE_CASE , schema=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(SCREAMING_SNAKE_CASE ) )] for k in DATA[0]} , schema=SCREAMING_SNAKE_CASE )
writer.write_table(SCREAMING_SNAKE_CASE )
writer.close()
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : int = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
lowerCAmelCase : Optional[Any] = {"data": DATA}
with open(SCREAMING_SNAKE_CASE , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
lowerCAmelCase : Optional[int] = {"data": DATA_DICT_OF_LISTS}
with open(SCREAMING_SNAKE_CASE , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase : str = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(SCREAMING_SNAKE_CASE , "w" ) as f:
for item in DATA:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : List[str] = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(SCREAMING_SNAKE_CASE , "w" ) as f:
for item in DATA:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Tuple = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(SCREAMING_SNAKE_CASE , "w" ) as f:
for item in DATA_312:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(SCREAMING_SNAKE_CASE , "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
import gzip
lowerCAmelCase : Dict = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(SCREAMING_SNAKE_CASE , "rb" ) as orig_file:
with gzip.open(SCREAMING_SNAKE_CASE , "wb" ) as zipped_file:
zipped_file.writelines(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
import gzip
lowerCAmelCase : Tuple = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(SCREAMING_SNAKE_CASE , "rb" ) as orig_file:
with gzip.open(SCREAMING_SNAKE_CASE , "wb" ) as zipped_file:
zipped_file.writelines(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join("nested" , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : str = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join("main_dir" , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join("main_dir" , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : Any = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(SCREAMING_SNAKE_CASE , "w" ) as f:
f.add(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
f.add(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(SCREAMING_SNAKE_CASE , "w" ) as f:
f.add(SCREAMING_SNAKE_CASE , arcname=os.path.join("nested" , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = ["0", "1", "2", "3"]
lowerCAmelCase : int = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(SCREAMING_SNAKE_CASE , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = ["0", "1", "2", "3"]
lowerCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(SCREAMING_SNAKE_CASE , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = ["0", "1", "2", "3"]
lowerCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(SCREAMING_SNAKE_CASE , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : str = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join("main_dir" , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join("main_dir" , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : List[str] = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename("unsupported.ext" ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Tuple = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
lowerCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def a__ ( ):
'''simple docstring'''
return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def a__ ( ):
'''simple docstring'''
return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Any = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ).replace(".jpg" , "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def a__ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : Tuple = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 1_0 )
with open(data_dir / "subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 1_0 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" , "w" ) as f:
f.write("bar\n" * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 1_0 )
with open(data_dir / ".subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 1_0 )
return data_dir
| 645
| 1
|
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_ :
def __init__( self , __lowercase , __lowercase=1_3 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=9_9 , __lowercase=3_2 , __lowercase=5 , __lowercase=4 , __lowercase=3_7 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=1_2_8 , __lowercase=3_2 , __lowercase=1_6 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=3 , __lowercase=4 , __lowercase=None , ) -> List[str]:
lowerCamelCase : int =parent
lowerCamelCase : Optional[int] =batch_size
lowerCamelCase : Any =seq_length
lowerCamelCase : List[Any] =is_training
lowerCamelCase : List[Any] =use_input_mask
lowerCamelCase : Any =use_token_type_ids
lowerCamelCase : Dict =use_labels
lowerCamelCase : Union[str, Any] =vocab_size
lowerCamelCase : Dict =hidden_size
lowerCamelCase : Optional[int] =num_hidden_layers
lowerCamelCase : Tuple =num_attention_heads
lowerCamelCase : Optional[int] =intermediate_size
lowerCamelCase : Tuple =hidden_act
lowerCamelCase : Optional[int] =hidden_dropout_prob
lowerCamelCase : Dict =attention_probs_dropout_prob
lowerCamelCase : Union[str, Any] =max_position_embeddings
lowerCamelCase : Union[str, Any] =type_vocab_size
lowerCamelCase : List[Any] =type_sequence_label_size
lowerCamelCase : Any =initializer_range
lowerCamelCase : Tuple =num_labels
lowerCamelCase : Dict =num_choices
lowerCamelCase : List[str] =scope
def __lowercase ( self ) -> Optional[Any]:
lowerCamelCase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : List[Any] =None
if self.use_input_mask:
lowerCamelCase : str =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : int =None
if self.use_token_type_ids:
lowerCamelCase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase : Union[str, Any] =None
lowerCamelCase : Tuple =None
lowerCamelCase : str =None
if self.use_labels:
lowerCamelCase : List[str] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Any =ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : Optional[int] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self ) -> Optional[Any]:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def __lowercase ( self ) -> Tuple:
(
lowerCamelCase
) : List[Any] =self.prepare_config_and_inputs()
lowerCamelCase : Tuple =True
lowerCamelCase : List[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase : Tuple =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowercase ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
lowerCamelCase : Dict =NezhaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCamelCase : Dict =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
lowerCamelCase : Optional[int] =model(__lowercase , token_type_ids=__lowercase )
lowerCamelCase : List[Any] =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowercase ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> str:
lowerCamelCase : Tuple =True
lowerCamelCase : Optional[int] =NezhaModel(__lowercase )
model.to(__lowercase )
model.eval()
lowerCamelCase : Optional[Any] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , )
lowerCamelCase : int =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , encoder_hidden_states=__lowercase , )
lowerCamelCase : int =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowercase ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> int:
lowerCamelCase : Union[str, Any] =NezhaForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCamelCase : Any =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple:
lowerCamelCase : Any =NezhaForNextSentencePrediction(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCamelCase : List[str] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowercase ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> str:
lowerCamelCase : str =NezhaForPreTraining(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCamelCase : str =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , next_sentence_label=__lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowercase ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]:
lowerCamelCase : Any =NezhaForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCamelCase : Optional[Any] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowercase ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> int:
lowerCamelCase : List[Any] =self.num_labels
lowerCamelCase : List[Any] =NezhaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
lowerCamelCase : int =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any:
lowerCamelCase : Optional[Any] =self.num_labels
lowerCamelCase : str =NezhaForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCamelCase : Dict =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Dict:
lowerCamelCase : Any =self.num_choices
lowerCamelCase : List[str] =NezhaForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCamelCase : Union[str, Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase : Tuple =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase : List[str] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase : List[str] =model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : int =self.prepare_config_and_inputs()
(
lowerCamelCase
) : Tuple =config_and_inputs
lowerCamelCase : str ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case_ ( _A , _A , _A , unittest.TestCase):
lowerCamelCase :Optional[Any] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase :str = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase :Dict = True
def __lowercase ( self , __lowercase , __lowercase , __lowercase=False ) -> Optional[int]:
lowerCamelCase : List[Any] =super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
lowerCamelCase : Tuple =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowercase )
lowerCamelCase : List[Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def __lowercase ( self ) -> List[str]:
lowerCamelCase : List[str] =NezhaModelTester(self )
lowerCamelCase : str =ConfigTester(self , config_class=__lowercase , hidden_size=3_7 )
def __lowercase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __lowercase ( self ) -> Any:
lowerCamelCase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __lowercase ( self ) -> Dict:
lowerCamelCase : int =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowercase )
def __lowercase ( self ) -> Any:
# This regression test was failing with PyTorch < 1.3
(
lowerCamelCase
) : Optional[int] =self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCamelCase : str =None
self.model_tester.create_and_check_model_as_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
def __lowercase ( self ) -> Optional[int]:
lowerCamelCase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowercase )
def __lowercase ( self ) -> List[str]:
lowerCamelCase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowercase )
def __lowercase ( self ) -> List[str]:
lowerCamelCase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*__lowercase )
def __lowercase ( self ) -> str:
lowerCamelCase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowercase )
def __lowercase ( self ) -> Tuple:
lowerCamelCase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowercase )
def __lowercase ( self ) -> List[Any]:
lowerCamelCase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowercase )
def __lowercase ( self ) -> Tuple:
lowerCamelCase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowercase )
@slow
def __lowercase ( self ) -> Optional[Any]:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Optional[int] =NezhaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@slow
@require_torch_gpu
def __lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
lowerCamelCase : List[str] =True
lowerCamelCase : int =model_class(config=__lowercase )
lowerCamelCase : Dict =self._prepare_for_class(__lowercase , __lowercase )
lowerCamelCase : List[Any] =torch.jit.trace(
__lowercase , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__lowercase , os.path.join(__lowercase , '''bert.pt''' ) )
lowerCamelCase : int =torch.jit.load(os.path.join(__lowercase , '''bert.pt''' ) , map_location=__lowercase )
loaded(inputs_dict['''input_ids'''].to(__lowercase ) , inputs_dict['''attention_mask'''].to(__lowercase ) )
@require_torch
class snake_case_ ( unittest.TestCase):
@slow
def __lowercase ( self ) -> int:
lowerCamelCase : str =NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
lowerCamelCase : Union[str, Any] =torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase : Any =torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase : Any =model(__lowercase , attention_mask=__lowercase )[0]
lowerCamelCase : Optional[Any] =torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , __lowercase )
lowerCamelCase : List[str] =torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowercase , atol=1e-4 ) )
@slow
def __lowercase ( self ) -> Any:
lowerCamelCase : Any =NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
lowerCamelCase : Any =torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase : Any =torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase : Union[str, Any] =model(__lowercase , attention_mask=__lowercase )[0]
lowerCamelCase : Optional[Any] =torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape , __lowercase )
lowerCamelCase : str =torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowercase , atol=1e-4 ) )
| 710
|
import string
import numpy
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
return b if a == 0 else greatest_common_divisor(b % a , SCREAMING_SNAKE_CASE_ )
class snake_case_ :
lowerCamelCase :Tuple = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
lowerCamelCase :str = numpy.vectorize(lambda _A: x % 36)
lowerCamelCase :str = numpy.vectorize(_A)
def __init__( self , __lowercase ) -> None:
lowerCamelCase : Union[str, Any] =self.modulus(__lowercase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
lowerCamelCase : int =encrypt_key.shape[0]
def __lowercase ( self , __lowercase ) -> int:
return self.key_string.index(__lowercase )
def __lowercase ( self , __lowercase ) -> str:
return self.key_string[round(__lowercase )]
def __lowercase ( self ) -> None:
lowerCamelCase : Tuple =round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCamelCase : Dict =det % len(self.key_string )
lowerCamelCase : Dict =len(self.key_string )
if greatest_common_divisor(__lowercase , len(self.key_string ) ) != 1:
lowerCamelCase : Union[str, Any] =(
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(__lowercase )
def __lowercase ( self , __lowercase ) -> str:
lowerCamelCase : int =[char for char in text.upper() if char in self.key_string]
lowerCamelCase : Tuple =chars[-1]
while len(__lowercase ) % self.break_key != 0:
chars.append(__lowercase )
return "".join(__lowercase )
def __lowercase ( self , __lowercase ) -> str:
lowerCamelCase : str =self.process_text(text.upper() )
lowerCamelCase : Optional[int] =''''''
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
lowerCamelCase : Union[str, Any] =text[i : i + self.break_key]
lowerCamelCase : Optional[int] =[self.replace_letters(__lowercase ) for char in batch]
lowerCamelCase : int =numpy.array([vec] ).T
lowerCamelCase : Optional[Any] =self.modulus(self.encrypt_key.dot(__lowercase ) ).T.tolist()[
0
]
lowerCamelCase : int =''''''.join(
self.replace_digits(__lowercase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def __lowercase ( self ) -> numpy.ndarray:
lowerCamelCase : List[Any] =round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCamelCase : int =det % len(self.key_string )
lowerCamelCase : Any =None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
lowerCamelCase : int =i
break
lowerCamelCase : Union[str, Any] =(
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__lowercase ) )
def __lowercase ( self , __lowercase ) -> str:
lowerCamelCase : Optional[int] =self.make_decrypt_key()
lowerCamelCase : int =self.process_text(text.upper() )
lowerCamelCase : Any =''''''
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
lowerCamelCase : int =text[i : i + self.break_key]
lowerCamelCase : str =[self.replace_letters(__lowercase ) for char in batch]
lowerCamelCase : Optional[int] =numpy.array([vec] ).T
lowerCamelCase : Optional[int] =self.modulus(decrypt_key.dot(__lowercase ) ).T.tolist()[0]
lowerCamelCase : Tuple =''''''.join(
self.replace_digits(__lowercase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def A__ ( ) -> None:
lowerCamelCase : str =int(input('''Enter the order of the encryption key: ''' ) )
lowerCamelCase : Optional[Any] =[]
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Optional[Any] =[int(SCREAMING_SNAKE_CASE_ ) for x in input().split()]
hill_matrix.append(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Tuple =HillCipher(numpy.array(SCREAMING_SNAKE_CASE_ ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
lowerCamelCase : Tuple =input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
lowerCamelCase : List[str] =input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(SCREAMING_SNAKE_CASE_ ) )
elif option == "2":
lowerCamelCase : int =input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 262
| 0
|
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a__ ( lowerCAmelCase ) -> Optional[int]:
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCamelCase ( _snake_case ):
'''simple docstring'''
@staticmethod
def _a (_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : int = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=_UpperCamelCase , default=_UpperCamelCase , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=_UpperCamelCase , help="""Name of the model to download""" )
download_parser.set_defaults(func=_UpperCamelCase )
def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = model
UpperCAmelCase__ : List[Any] = cache
UpperCAmelCase__ : List[str] = force
UpperCAmelCase__ : List[str] = trust_remote_code
def _a (self ):
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 182
|
__UpperCAmelCase = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 406
| 0
|
def UpperCAmelCase ( A : int , A : int ):
'''simple docstring'''
return number | (1 << position)
def UpperCAmelCase ( A : int , A : int ):
'''simple docstring'''
return number & ~(1 << position)
def UpperCAmelCase ( A : int , A : int ):
'''simple docstring'''
return number ^ (1 << position)
def UpperCAmelCase ( A : int , A : int ):
'''simple docstring'''
return ((number >> position) & 1) == 1
def UpperCAmelCase ( A : int , A : int ):
'''simple docstring'''
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
"""simple docstring"""
import os
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = os.path.join(os.path.dirname(A ) , 'num.txt' )
with open(A ) as file_hand:
return str(sum(int(A ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 24
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE__ = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple=8 ):
'''simple docstring'''
lowerCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str]=5_12 , SCREAMING_SNAKE_CASE : Tuple=5_12 ):
'''simple docstring'''
lowerCAmelCase = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowerCAmelCase = np.array(pil_image.convert("""RGB""" ) )
lowerCAmelCase = arr.astype(np.floataa ) / 1_27.5 - 1
lowerCAmelCase = np.transpose(SCREAMING_SNAKE_CASE__ , [2, 0, 1] )
lowerCAmelCase = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
return image
class lowercase ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , lowercase , lowercase , lowercase , ) -> str:
super().__init__()
self.register_modules(
unet=lowercase , scheduler=lowercase , movq=lowercase , )
lowerCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _snake_case ( self , lowercase , lowercase , lowercase ) -> Dict:
lowerCAmelCase = min(int(num_inference_steps * strength ) , lowercase )
lowerCAmelCase = max(num_inference_steps - init_timestep , 0 )
lowerCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=None ) -> Tuple:
if not isinstance(lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase )}' )
lowerCAmelCase = image.to(device=lowercase , dtype=lowercase )
lowerCAmelCase = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowerCAmelCase = image
else:
if isinstance(lowercase , lowercase ) and len(lowercase ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(lowercase )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
elif isinstance(lowercase , lowercase ):
lowerCAmelCase = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase )
]
lowerCAmelCase = torch.cat(lowercase , dim=0 )
else:
lowerCAmelCase = self.movq.encode(lowercase ).latent_dist.sample(lowercase )
lowerCAmelCase = self.movq.config.scaling_factor * init_latents
lowerCAmelCase = torch.cat([init_latents] , dim=0 )
lowerCAmelCase = init_latents.shape
lowerCAmelCase = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase )
# get latents
lowerCAmelCase = self.scheduler.add_noise(lowercase , lowercase , lowercase )
lowerCAmelCase = init_latents
return latents
def _snake_case ( self , lowercase=0 ) -> Tuple:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowerCAmelCase = torch.device(f'cuda:{gpu_id}' )
lowerCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
def _snake_case ( self , lowercase=0 ) -> List[str]:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
lowerCAmelCase = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCAmelCase = cpu_offload_with_hook(lowercase , lowercase , prev_module_hook=lowercase )
# We'll offload the last model manually.
lowerCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _snake_case ( self ) -> Optional[int]:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase )
def __call__( self , lowercase , lowercase , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 100 , lowercase = 4.0 , lowercase = 0.3 , lowercase = 1 , lowercase = None , lowercase = "pil" , lowercase = True , ) -> Tuple:
lowerCAmelCase = self._execution_device
lowerCAmelCase = guidance_scale > 1.0
if isinstance(lowercase , lowercase ):
lowerCAmelCase = torch.cat(lowercase , dim=0 )
lowerCAmelCase = image_embeds.shape[0]
if isinstance(lowercase , lowercase ):
lowerCAmelCase = torch.cat(lowercase , dim=0 )
if do_classifier_free_guidance:
lowerCAmelCase = image_embeds.repeat_interleave(lowercase , dim=0 )
lowerCAmelCase = negative_image_embeds.repeat_interleave(lowercase , dim=0 )
lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase )
if not isinstance(lowercase , lowercase ):
lowerCAmelCase = [image]
if not all(isinstance(lowercase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f'Input is in incorrect format: {[type(lowercase ) for i in image]}. Currently, we only support PIL image and pytorch tensor' )
lowerCAmelCase = torch.cat([prepare_image(lowercase , lowercase , lowercase ) for i in image] , dim=0 )
lowerCAmelCase = image.to(dtype=image_embeds.dtype , device=lowercase )
lowerCAmelCase = self.movq.encode(lowercase )["""latents"""]
lowerCAmelCase = latents.repeat_interleave(lowercase , dim=0 )
self.scheduler.set_timesteps(lowercase , device=lowercase )
lowerCAmelCase = self.get_timesteps(lowercase , lowercase , lowercase )
lowerCAmelCase = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowerCAmelCase = downscale_height_and_width(lowercase , lowercase , self.movq_scale_factor )
lowerCAmelCase = self.prepare_latents(
lowercase , lowercase , lowercase , lowercase , image_embeds.dtype , lowercase , lowercase )
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase = {"""image_embeds""": image_embeds}
lowerCAmelCase = self.unet(
sample=lowercase , timestep=lowercase , encoder_hidden_states=lowercase , added_cond_kwargs=lowercase , return_dict=lowercase , )[0]
if do_classifier_free_guidance:
lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
lowerCAmelCase = noise_pred.chunk(2 )
lowerCAmelCase = variance_pred.chunk(2 )
lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase = self.scheduler.step(
lowercase , lowercase , lowercase , generator=lowercase , )[0]
# post-processing
lowerCAmelCase = self.movq.decode(lowercase , force_not_quantize=lowercase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
lowerCAmelCase = image * 0.5 + 0.5
lowerCAmelCase = image.clamp(0 , 1 )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase = self.numpy_to_pil(lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase )
| 532
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
a__ = logging.get_logger(__name__)
a__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a__ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
a__ = {
"""google/realm-cc-news-pretrained-embedder""": 5_12,
"""google/realm-cc-news-pretrained-encoder""": 5_12,
"""google/realm-cc-news-pretrained-scorer""": 5_12,
"""google/realm-cc-news-pretrained-openqa""": 5_12,
"""google/realm-orqa-nq-openqa""": 5_12,
"""google/realm-orqa-nq-reader""": 5_12,
"""google/realm-orqa-wq-openqa""": 5_12,
"""google/realm-orqa-wq-reader""": 5_12,
}
a__ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = VOCAB_FILES_NAMES
snake_case_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case_ : Any = PRETRAINED_INIT_CONFIGURATION
snake_case_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ : Dict = RealmTokenizer
def __init__( self : int , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Union[str, Any]="[UNK]" , lowerCAmelCase : List[str]="[SEP]" , lowerCAmelCase : Optional[int]="[PAD]" , lowerCAmelCase : List[Any]="[CLS]" , lowerCAmelCase : Any="[MASK]" , lowerCAmelCase : Dict=True , lowerCAmelCase : int=None , **lowerCAmelCase : Tuple , ) -> List[str]:
"""simple docstring"""
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
_snake_case : str = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("""lowercase""" , lowerCAmelCase) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCAmelCase) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCAmelCase) != tokenize_chinese_chars
):
_snake_case : Tuple = getattr(lowerCAmelCase , normalizer_state.pop("""type"""))
_snake_case : Any = do_lower_case
_snake_case : Optional[int] = strip_accents
_snake_case : str = tokenize_chinese_chars
_snake_case : List[str] = normalizer_class(**lowerCAmelCase)
_snake_case : int = do_lower_case
def UpperCamelCase_ ( self : Dict , lowerCAmelCase : List[str] , **lowerCAmelCase : Tuple) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Dict = PaddingStrategy.MAX_LENGTH
_snake_case : Any = text
_snake_case : List[Any] = kwargs.pop("""text_pair""" , lowerCAmelCase)
_snake_case : Union[str, Any] = kwargs.pop("""return_tensors""" , lowerCAmelCase)
_snake_case : Optional[Any] = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(lowerCAmelCase):
if batch_text_pair is not None:
_snake_case : Dict = batch_text_pair[idx]
else:
_snake_case : List[str] = None
_snake_case : Optional[int] = super().__call__(lowerCAmelCase , lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase)
_snake_case : str = encoded_candidates.get("""input_ids""")
_snake_case : Union[str, Any] = encoded_candidates.get("""attention_mask""")
_snake_case : Any = encoded_candidates.get("""token_type_ids""")
if encoded_input_ids is not None:
output_data["input_ids"].append(lowerCAmelCase)
if encoded_attention_mask is not None:
output_data["attention_mask"].append(lowerCAmelCase)
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(lowerCAmelCase)
_snake_case : str = {key: item for key, item in output_data.items() if len(lowerCAmelCase) != 0}
return BatchEncoding(lowerCAmelCase , tensor_type=lowerCAmelCase)
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any]=None) -> List[str]:
"""simple docstring"""
_snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : Dict , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_snake_case : List[Any] = [self.sep_token_id]
_snake_case : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
_snake_case : Dict = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase)
return tuple(lowerCAmelCase)
| 477
| 0
|
'''simple docstring'''
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def UpperCamelCase_ ( A__ ):
a_ = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def UpperCamelCase_ ( A__ ):
a_ , a_ = emb.weight.shape
a_ = nn.Linear(A__ , A__ , bias=A__ )
a_ = emb.weight.data
return lin_layer
def UpperCamelCase_ ( A__ ):
a_ = torch.load(A__ , map_location="""cpu""" )
a_ = Namespace(**checkpoint["""cfg"""]["""model"""] )
a_ = checkpoint["""model"""]
remove_ignore_keys_(A__ )
a_ = state_dict["""decoder.embed_tokens.weight"""].shape[0]
a_ = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()}
a_ = XGLMConfig(
vocab_size=A__ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
a_ = XGLMForCausalLM(A__ )
a_ = model.load_state_dict(A__ , strict=A__ )
print(A__ )
a_ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowercase__ =parser.parse_args()
lowercase__ =convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 701
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowercase__ ='pt'
elif is_tf_available():
lowercase__ ='tf'
else:
lowercase__ ='jax'
class a_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : int = PerceiverTokenizer
lowerCamelCase__ : Optional[int] = False
def lowerCAmelCase__ ( self ):
super().setUp()
a_ = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase__ ( self ):
return PerceiverTokenizer.from_pretrained("""deepmind/language-perceiver""" )
def lowerCAmelCase__ ( self , **UpperCAmelCase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=20 , UpperCAmelCase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
a_ = []
for i in range(len(UpperCAmelCase ) ):
try:
a_ = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
a_ = list(filter(lambda UpperCAmelCase : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , UpperCAmelCase ) )
a_ = list(filter(lambda UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCAmelCase ) , UpperCAmelCase ) )
if max_length is not None and len(UpperCAmelCase ) > max_length:
a_ = toks[:max_length]
if min_length is not None and len(UpperCAmelCase ) < min_length and len(UpperCAmelCase ) > 0:
while len(UpperCAmelCase ) < min_length:
a_ = toks + toks
# toks_str = [t[1] for t in toks]
a_ = [t[0] for t in toks]
# Ensure consistency
a_ = tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
if " " not in output_txt and len(UpperCAmelCase ) > 1:
a_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCAmelCase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCAmelCase )
)
if with_prefix_space:
a_ = """ """ + output_txt
a_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
return output_txt, output_ids
def lowerCAmelCase__ ( self ):
a_ = self.perceiver_tokenizer
a_ = """Unicode €."""
a_ = tokenizer(UpperCAmelCase )
a_ = [4, 91, 1_16, 1_11, 1_05, 1_17, 1_06, 1_07, 38, 2_32, 1_36, 1_78, 52, 5]
self.assertEqual(encoded["""input_ids"""] , UpperCAmelCase )
# decoding
a_ = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , """[CLS]Unicode €.[SEP]""" )
a_ = tokenizer("""e è é ê ë""" )
a_ = [4, 1_07, 38, 2_01, 1_74, 38, 2_01, 1_75, 38, 2_01, 1_76, 38, 2_01, 1_77, 5]
self.assertEqual(encoded["""input_ids"""] , UpperCAmelCase )
# decoding
a_ = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , """[CLS]e è é ê ë[SEP]""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """[CLS]e è é ê ë[SEP]""" )
def lowerCAmelCase__ ( self ):
a_ = self.perceiver_tokenizer
a_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
a_ = [4, 71, 38, 1_14, 1_17, 1_16, 1_09, 38, 1_18, 1_03, 1_20, 1_03, 1_09, 1_20, 1_03, 1_18, 1_10, 38, 1_08, 1_17, 1_20, 38, 1_21, 1_23, 1_15, 1_15, 1_03, 1_20, 1_11, 1_28, 1_03, 1_22, 1_11, 1_17, 1_16, 52, 5, 0]
# fmt: on
a_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
if FRAMEWORK != "jax":
a_ = list(batch.input_ids.numpy()[0] )
else:
a_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowerCAmelCase__ ( self ):
a_ = self.perceiver_tokenizer
a_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
a_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , UpperCAmelCase )
self.assertIn("""attention_mask""" , UpperCAmelCase )
self.assertNotIn("""decoder_input_ids""" , UpperCAmelCase )
self.assertNotIn("""decoder_attention_mask""" , UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = self.perceiver_tokenizer
a_ = [
"""Summary of the text.""",
"""Another summary.""",
]
a_ = tokenizer(
text_target=UpperCAmelCase , max_length=32 , padding="""max_length""" , truncation=UpperCAmelCase , return_tensors=UpperCAmelCase )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowerCAmelCase__ ( self ):
# safety check on max_len default value so we are sure the test works
a_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
a_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
a_ = tempfile.mkdtemp()
a_ = """ He is very happy, UNwant\u00E9d,running"""
a_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
a_ = tokenizer.__class__.from_pretrained(UpperCAmelCase )
a_ = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
shutil.rmtree(UpperCAmelCase )
a_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
a_ = tempfile.mkdtemp()
a_ = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
a_ = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
a_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
a_ = tokenizer.__class__.from_pretrained(UpperCAmelCase )
a_ = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
a_ = tokenizer.__class__.from_pretrained(UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
a_ = json.load(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
a_ = json.load(UpperCAmelCase )
a_ = [f'''<extra_id_{i}>''' for i in range(1_25 )]
a_ = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
a_ = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(UpperCAmelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
a_ = tokenizer_class.from_pretrained(
UpperCAmelCase , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
a_ = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=UpperCAmelCase )]
a_ = tokenizer_class.from_pretrained(
UpperCAmelCase , additional_special_tokens=UpperCAmelCase , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def lowerCAmelCase__ ( self ):
a_ = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_78] ) , """�""" )
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
a_ = self.get_tokenizers(fast=UpperCAmelCase , do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
a_ = ["""[CLS]""", """t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """s""", """t""", """[SEP]"""]
a_ = tokenizer.convert_tokens_to_string(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
| 511
| 0
|
a__: dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_02_17_66_34e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.35_58_18,
}
def UpperCamelCase__( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : float )->float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
A__ = (
f"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
f"Valid values are: {', '.join(UpperCamelCase__ )}"
)
raise ValueError(UpperCamelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 190
|
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self,**__lowerCamelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname,**__lowerCamelCase ).image_processor
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
A__ = [np.random.randint(255,size=(3, 30, 400),dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(__lowerCamelCase,0,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self ):
A__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_image_processor(do_normalize=__lowerCamelCase,padding_value=1.0 )
A__ = SamProcessor.from_pretrained(self.tmpdirname,do_normalize=__lowerCamelCase,padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string(),image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor,__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=__lowerCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(__lowerCamelCase,return_tensors='''np''' )
A__ = processor(images=__lowerCamelCase,return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(),input_processor[key].sum(),delta=1E-2 )
@require_torch
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=__lowerCamelCase )
A__ = [torch.ones((1, 3, 5, 5) )]
A__ = [[1764, 2646]]
A__ = [[683, 1024]]
A__ = processor.post_process_masks(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
self.assertEqual(masks[0].shape,(1, 3, 1764, 2646) )
A__ = processor.post_process_masks(
__lowerCamelCase,torch.tensor(__lowerCamelCase ),torch.tensor(__lowerCamelCase ) )
self.assertEqual(masks[0].shape,(1, 3, 1764, 2646) )
# should also work with np
A__ = [np.ones((1, 3, 5, 5) )]
A__ = processor.post_process_masks(__lowerCamelCase,np.array(__lowerCamelCase ),np.array(__lowerCamelCase ) )
self.assertEqual(masks[0].shape,(1, 3, 1764, 2646) )
A__ = [[1, 0], [0, 1]]
with self.assertRaises(__lowerCamelCase ):
A__ = processor.post_process_masks(__lowerCamelCase,np.array(__lowerCamelCase ),np.array(__lowerCamelCase ) )
@require_vision
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self,**__lowerCamelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname,**__lowerCamelCase ).image_processor
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
A__ = [np.random.randint(255,size=(3, 30, 400),dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(__lowerCamelCase,0,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self ):
A__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_image_processor(do_normalize=__lowerCamelCase,padding_value=1.0 )
A__ = SamProcessor.from_pretrained(self.tmpdirname,do_normalize=__lowerCamelCase,padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string(),image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor,__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=__lowerCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(__lowerCamelCase,return_tensors='''np''' )
A__ = processor(images=__lowerCamelCase,return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(),input_processor[key].sum(),delta=1E-2 )
@require_tf
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=__lowerCamelCase )
A__ = [tf.ones((1, 3, 5, 5) )]
A__ = [[1764, 2646]]
A__ = [[683, 1024]]
A__ = processor.post_process_masks(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,return_tensors='''tf''' )
self.assertEqual(masks[0].shape,(1, 3, 1764, 2646) )
A__ = processor.post_process_masks(
__lowerCamelCase,tf.convert_to_tensor(__lowerCamelCase ),tf.convert_to_tensor(__lowerCamelCase ),return_tensors='''tf''',)
self.assertEqual(masks[0].shape,(1, 3, 1764, 2646) )
# should also work with np
A__ = [np.ones((1, 3, 5, 5) )]
A__ = processor.post_process_masks(
__lowerCamelCase,np.array(__lowerCamelCase ),np.array(__lowerCamelCase ),return_tensors='''tf''' )
self.assertEqual(masks[0].shape,(1, 3, 1764, 2646) )
A__ = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
A__ = processor.post_process_masks(
__lowerCamelCase,np.array(__lowerCamelCase ),np.array(__lowerCamelCase ),return_tensors='''tf''' )
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self,**__lowerCamelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname,**__lowerCamelCase ).image_processor
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
A__ = [np.random.randint(255,size=(3, 30, 400),dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(__lowerCamelCase,0,-1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=__lowerCamelCase )
A__ = np.random.randint(0,2,size=(1, 3, 5, 5) ).astype(np.floataa )
A__ = [tf.convert_to_tensor(__lowerCamelCase )]
A__ = [torch.tensor(__lowerCamelCase )]
A__ = [[1764, 2646]]
A__ = [[683, 1024]]
A__ = processor.post_process_masks(
__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,return_tensors='''tf''' )
A__ = processor.post_process_masks(
__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,return_tensors='''pt''' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=__lowerCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(__lowerCamelCase,return_tensors='''pt''' )['''pixel_values'''].numpy()
A__ = processor(images=__lowerCamelCase,return_tensors='''pt''' )['''pixel_values'''].numpy()
A__ = image_processor(__lowerCamelCase,return_tensors='''tf''' )['''pixel_values'''].numpy()
A__ = processor(images=__lowerCamelCase,return_tensors='''tf''' )['''pixel_values'''].numpy()
self.assertTrue(np.allclose(__lowerCamelCase,__lowerCamelCase ) )
self.assertTrue(np.allclose(__lowerCamelCase,__lowerCamelCase ) )
self.assertTrue(np.allclose(__lowerCamelCase,__lowerCamelCase ) )
| 190
| 1
|
import numpy as np
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = int(np.ceil((x_end - xa) / h ) )
_SCREAMING_SNAKE_CASE = np.zeros((n + 1,) )
_SCREAMING_SNAKE_CASE = ya
_SCREAMING_SNAKE_CASE = xa
for k in range(lowercase_ ):
_SCREAMING_SNAKE_CASE = f(lowercase_ , y[k] )
_SCREAMING_SNAKE_CASE = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
_SCREAMING_SNAKE_CASE = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
_SCREAMING_SNAKE_CASE = f(x + h , y[k] + h * ka )
_SCREAMING_SNAKE_CASE = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE = (
Features({feature: Value(SCREAMING_SNAKE_CASE_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , split=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
if issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = parquet_path
elif issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = [parquet_path]
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=("train",) ) -> List[str]:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for split in splits:
_SCREAMING_SNAKE_CASE = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE = (
Features({feature: Value(SCREAMING_SNAKE_CASE_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE = ParquetDatasetReader({"""train""": parquet_path} , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
if split:
_SCREAMING_SNAKE_CASE = {split: parquet_path}
else:
_SCREAMING_SNAKE_CASE = """train"""
_SCREAMING_SNAKE_CASE = {"""train""": parquet_path, """test""": parquet_path}
_SCREAMING_SNAKE_CASE = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ParquetDatasetWriter(SCREAMING_SNAKE_CASE_ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE = pq.ParquetFile(tmp_path / """foo.parquet""" )
_SCREAMING_SNAKE_CASE = pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = str(shared_datadir / """test_image_rgb.jpg""" )
_SCREAMING_SNAKE_CASE = {"""image""": [image_path]}
_SCREAMING_SNAKE_CASE = Features({"""image""": Image()} )
_SCREAMING_SNAKE_CASE = Dataset.from_dict(SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = ParquetDatasetWriter(SCREAMING_SNAKE_CASE_ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=SCREAMING_SNAKE_CASE_ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
assert get_writer_batch_size(SCREAMING_SNAKE_CASE_ ) == expected
| 0
| 0
|
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
a : Tuple = {
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def lowercase__(A ) ->int:
"""simple docstring"""
lowercase__ : List[str]= {}
state_dict.pop("pixel_mean" , UpperCamelCase__ )
state_dict.pop("pixel_std" , UpperCamelCase__ )
lowercase__ : Dict= R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowercase__ : Any= key.replace(UpperCamelCase__ , UpperCamelCase__ )
if re.match(UpperCamelCase__ , UpperCamelCase__ ):
lowercase__ : List[Any]= int(re.match(UpperCamelCase__ , UpperCamelCase__ ).group(2 ) )
if layer_nb == 0:
lowercase__ : int= key.replace("layers.0" , "proj_in" )
elif layer_nb == 1:
lowercase__ : Dict= key.replace("layers.1" , "layers.0" )
elif layer_nb == 2:
lowercase__ : Dict= key.replace("layers.2" , "proj_out" )
lowercase__ : str= value
lowercase__ : List[str]= model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def lowercase__(A , A , A , A="ybelkada/segment-anything" ) ->Any:
"""simple docstring"""
lowercase__ : Optional[int]= hf_hub_download(UpperCamelCase__ , f'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
lowercase__ : Any= SamConfig()
elif "sam_vit_l" in model_name:
lowercase__ : List[Any]= SamVisionConfig(
hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowercase__ : List[Any]= SamConfig(
vision_config=UpperCamelCase__ , )
elif "sam_vit_h" in model_name:
lowercase__ : Union[str, Any]= SamVisionConfig(
hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowercase__ : Optional[int]= SamConfig(
vision_config=UpperCamelCase__ , )
lowercase__ : Tuple= torch.load(UpperCamelCase__ , map_location="cpu" )
lowercase__ : str= replace_keys(UpperCamelCase__ )
lowercase__ : str= SamImageProcessor()
lowercase__ : Dict= SamProcessor(image_processor=UpperCamelCase__ )
lowercase__ : Union[str, Any]= SamModel(UpperCamelCase__ )
hf_model.load_state_dict(UpperCamelCase__ )
lowercase__ : List[Any]= hf_model.to("cuda" )
lowercase__ : Union[str, Any]= "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
lowercase__ : List[Any]= Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert("RGB" )
lowercase__ : str= [[[400, 650]]]
lowercase__ : Union[str, Any]= [[1]]
lowercase__ : int= processor(images=np.array(UpperCamelCase__ ) , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
lowercase__ : Tuple= hf_model(**UpperCamelCase__ )
lowercase__ : str= output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_890_251_159_668
lowercase__ : str= processor(
images=np.array(UpperCamelCase__ ) , input_points=UpperCamelCase__ , input_labels=UpperCamelCase__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
lowercase__ : str= hf_model(**UpperCamelCase__ )
lowercase__ : Dict= output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_712_603_092_193_604
lowercase__ : Any= ((75, 275, 1_725, 850),)
lowercase__ : Union[str, Any]= processor(images=np.array(UpperCamelCase__ ) , input_boxes=UpperCamelCase__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
lowercase__ : List[str]= hf_model(**UpperCamelCase__ )
lowercase__ : Optional[Any]= output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_686_015_605_926_514
# Test with 2 points and 1 image.
lowercase__ : List[Any]= [[[400, 650], [800, 650]]]
lowercase__ : str= [[1, 1]]
lowercase__ : Tuple= processor(
images=np.array(UpperCamelCase__ ) , input_points=UpperCamelCase__ , input_labels=UpperCamelCase__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
lowercase__ : Optional[int]= hf_model(**UpperCamelCase__ )
lowercase__ : Union[str, Any]= output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_936_047_792_434_692
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
a : List[str] = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
a : Optional[Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 218
|
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__UpperCAmelCase =logging.get_logger(__name__)
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Union[str, Any] =["audio_values", "audio_mask"]
def __init__( self : List[str] , a : List[Any]=20_48 , a : Optional[int]=1 , a : List[str]=[16, 16] , a : int=1_28 , a : Dict=4_41_00 , a : str=86 , a : int=20_48 , a : int=0.0 , **a : Dict , ):
"""simple docstring"""
super().__init__(
feature_size=a , sampling_rate=a , padding_value=a , **a , )
__lowerCamelCase = spectrogram_length
__lowerCamelCase = num_channels
__lowerCamelCase = patch_size
__lowerCamelCase = feature_size // self.patch_size[1]
__lowerCamelCase = n_fft
__lowerCamelCase = sampling_rate // hop_length_to_sampling_rate
__lowerCamelCase = sampling_rate
__lowerCamelCase = padding_value
__lowerCamelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=a , norm='''slaney''' , mel_scale='''slaney''' , ).T
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : np.array ):
"""simple docstring"""
__lowerCamelCase = spectrogram(
a , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
__lowerCamelCase = log_spec[:, :-1]
__lowerCamelCase = log_spec - 20.0
__lowerCamelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Union[str, Any] , a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a : Optional[Union[str, TensorType]] = None , a : Optional[bool] = True , a : Optional[int] = None , a : bool = False , a : bool = False , **a : int , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__lowerCamelCase = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__lowerCamelCase = is_batched_numpy or (
isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowerCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a , np.ndarray ):
__lowerCamelCase = np.asarray(a , dtype=np.floataa )
elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowerCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowerCamelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowerCamelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , a ):
__lowerCamelCase = [np.asarray(a , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__lowerCamelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowerCamelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__lowerCamelCase = np.array(a ).astype(np.floataa )
# convert into correct format for padding
__lowerCamelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowerCamelCase = np.ones([len(a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__lowerCamelCase = padded_audio_features * self.padding_value
for i in range(len(a ) ):
__lowerCamelCase = audio_features[i]
__lowerCamelCase = feature
# return as BatchFeature
if return_attention_mask:
__lowerCamelCase = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
__lowerCamelCase = {'''audio_values''': padded_audio_features}
__lowerCamelCase = BatchFeature(data=a , tensor_type=a )
return encoded_inputs
| 546
| 0
|
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
A__: int = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def lowerCAmelCase_ ( A_ ,A_ ,A_ ,A_ ,A_ ,A_ ,A_ ,A_=False ,):
output_path.parent.mkdir(parents=A_ ,exist_ok=A_)
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
A_ ,A_ ,f=output_path.as_posix() ,input_names=A_ ,output_names=A_ ,dynamic_axes=A_ ,do_constant_folding=A_ ,use_external_data_format=A_ ,enable_onnx_checker=A_ ,opset_version=A_ ,)
else:
export(
A_ ,A_ ,f=output_path.as_posix() ,input_names=A_ ,output_names=A_ ,dynamic_axes=A_ ,do_constant_folding=A_ ,opset_version=A_ ,)
@torch.no_grad()
def lowerCAmelCase_ ( A_ ,A_ ,A_ ,A_ = False):
UpperCamelCase__: Optional[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
UpperCamelCase__: Any = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA")
else:
UpperCamelCase__: Any = "cpu"
UpperCamelCase__: str = Path(A_)
# VAE DECODER
UpperCamelCase__: List[Any] = AutoencoderKL.from_pretrained(model_path + "/vae")
UpperCamelCase__: Dict = vae_decoder.config.latent_channels
# forward only through the decoder part
UpperCamelCase__: List[str] = vae_decoder.decode
onnx_export(
A_ ,model_args=(
torch.randn(1 ,A_ ,25 ,25).to(device=A_ ,dtype=A_),
False,
) ,output_path=output_path / "vae_decoder" / "model.onnx" ,ordered_input_names=["latent_sample", "return_dict"] ,output_names=["sample"] ,dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} ,opset=A_ ,)
del vae_decoder
if __name__ == "__main__":
A__: List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
A__: Tuple = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 716
|
def lowerCAmelCase_ ( A_):
UpperCamelCase__: Union[str, Any] = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def lowerCAmelCase_ ( A_):
UpperCamelCase__: Any = [chr(i + 65) for i in range(26)]
# Remove duplicate characters from key
UpperCamelCase__: Dict = remove_duplicates(key.upper())
UpperCamelCase__: Optional[int] = len(A_)
# First fill cipher with key characters
UpperCamelCase__: Any = {alphabet[i]: char for i, char in enumerate(A_)}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(A_) ,26):
UpperCamelCase__: List[Any] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
UpperCamelCase__: Any = alphabet[i - offset]
UpperCamelCase__: Tuple = char
return cipher_alphabet
def lowerCAmelCase_ ( A_ ,A_):
return "".join(cipher_map.get(A_ ,A_) for ch in message.upper())
def lowerCAmelCase_ ( A_ ,A_):
UpperCamelCase__: int = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(A_ ,A_) for ch in message.upper())
def lowerCAmelCase_ ( ):
UpperCamelCase__: Union[str, Any] = input("Enter message to encode or decode: ").strip()
UpperCamelCase__: Union[str, Any] = input("Enter keyword: ").strip()
UpperCamelCase__: int = input("Encipher or decipher? E/D:").strip()[0].lower()
try:
UpperCamelCase__: Optional[Any] = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option")
UpperCamelCase__: Optional[Any] = create_cipher_map(A_)
print(func(A_ ,A_))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 221
| 0
|
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ : Union[str, Any] = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = DebertaVaTokenizer
__UpperCAmelCase = DebertaVaTokenizerFast
__UpperCAmelCase = True
__UpperCAmelCase = True
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : Any = DebertaVaTokenizer(snake_case_ , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Union[str, Any] , snake_case_ : str ):
'''simple docstring'''
snake_case__ : Tuple = 'this is a test'
snake_case__ : Optional[int] = 'this is a test'
return input_text, output_text
def __magic_name__ ( self : Dict ):
'''simple docstring'''
snake_case__ : Any = '<pad>'
snake_case__ : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
snake_case__ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(snake_case_ ) , 3_0_0_0_1 )
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
snake_case__ : Tuple = ' \tHeLLo!how \n Are yoU? '
snake_case__ : List[Any] = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
snake_case__ : Dict = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ )
snake_case__ : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
snake_case__ : Optional[Any] = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ )
snake_case__ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
pass
def __magic_name__ ( self : str ):
'''simple docstring'''
snake_case__ : int = 'I was born in 92000, and this is falsé.'
snake_case__ : Union[str, Any] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case__ : Tuple = DebertaVaTokenizer(snake_case_ , split_by_punct=snake_case_ )
snake_case__ : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
snake_case__ : Union[str, Any] = DebertaVaTokenizerFast(snake_case_ , split_by_punct=snake_case_ )
snake_case__ : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
snake_case__ : Optional[Any] = 'I was born in 92000, and this is falsé.'
snake_case__ : Optional[Any] = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case__ : Optional[int] = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
snake_case__ : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
snake_case__ : List[str] = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
snake_case__ : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
snake_case__ : Dict = 'I was born in 92000, and this is falsé.'
snake_case__ : Dict = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
snake_case__ : Union[str, Any] = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
snake_case__ : List[Any] = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
snake_case__ : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def __magic_name__ ( self : Any ):
'''simple docstring'''
snake_case__ : Dict = 'I was born in 92000, and this is falsé.'
snake_case__ : List[Any] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case__ : Optional[Any] = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
snake_case__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
snake_case__ : List[str] = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
snake_case__ : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def __magic_name__ ( self : Any ):
'''simple docstring'''
snake_case__ : Tuple = ' \tHeLLo!how \n Are yoU? '
snake_case__ : Tuple = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
snake_case__ : Dict = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
snake_case__ : Optional[Any] = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
snake_case__ : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ : str = self.get_tokenizer()
snake_case__ : Dict = self.get_rust_tokenizer()
snake_case__ : int = 'I was born in 92000, and this is falsé.'
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
snake_case__ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
snake_case__ : Optional[Any] = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
snake_case__ : Optional[int] = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
snake_case__ : Optional[int] = self.get_rust_tokenizer()
snake_case__ : Any = tokenizer.encode(snake_case_ )
snake_case__ : int = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ : str = 'This is a test'
snake_case__ : str = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
snake_case__ : str = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
snake_case__ : Any = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
snake_case__ : Optional[int] = DebertaVaTokenizer(snake_case_ , keep_accents=snake_case_ )
snake_case__ : int = DebertaVaTokenizerFast(snake_case_ , keep_accents=snake_case_ )
snake_case__ : Any = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
snake_case__ : int = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
snake_case__ : str = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
snake_case__ : List[Any] = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
snake_case__ : List[str] = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
snake_case__ : List[Any] = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# fmt: off
snake_case__ : int = 'I was born in 92000, and this is falsé.'
snake_case__ : Union[str, Any] = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
snake_case__ : int = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
snake_case__ : Dict = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
snake_case__ : Union[str, Any] = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
snake_case__ : Union[str, Any] = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
snake_case__ : int = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
snake_case__ : Any = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
snake_case__ : Any = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
snake_case__ : Tuple = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ : List[Any] = DebertaVaTokenizer(snake_case_ )
snake_case__ : Dict = tokenizer.encode('''sequence builders''' )
snake_case__ : int = tokenizer.encode('''multi-sequence build''' )
snake_case__ : List[Any] = tokenizer.build_inputs_with_special_tokens(snake_case_ )
snake_case__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , snake_case_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , snake_case_ , )
@slow
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ : List[str] = {'input_ids': [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 347
|
"""simple docstring"""
def UpperCAmelCase_ ( __a : int ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = int(__a )
if decimal in (0, 1): # Exit cases for the recursion
return str(__a )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = divmod(__a , 2 )
return binary_recursive(__a ) + str(__a )
def UpperCAmelCase_ ( __a : str ):
'''simple docstring'''
_lowerCamelCase : int = str(__a ).strip()
if not number:
raise ValueError('No input value was provided' )
_lowerCamelCase : Tuple = '-' if number.startswith('-' ) else ''
_lowerCamelCase : List[Any] = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return f"{negative}0b{binary_recursive(int(__a ) )}"
if __name__ == "__main__":
from doctest import testmod
testmod()
| 437
| 0
|
from copy import deepcopy
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , a__ = None , a__ = None ):
if arr is None and size is not None:
A_ : Dict = size
A_ : str = [0] * size
elif arr is not None:
self.init(UpperCAmelCase_ )
else:
raise ValueError("""Either arr or size must be specified""" )
def _lowerCamelCase ( self , a__ ):
A_ : Any = len(UpperCAmelCase_ )
A_ : Dict = deepcopy(UpperCAmelCase_ )
for i in range(1 , self.size ):
A_ : Any = self.next_(UpperCAmelCase_ )
if j < self.size:
self.tree[j] += self.tree[i]
def _lowerCamelCase ( self ):
A_ : str = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
A_ : Optional[int] = self.next_(UpperCAmelCase_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _lowerCamelCase ( a__ ):
return index + (index & (-index))
@staticmethod
def _lowerCamelCase ( a__ ):
return index - (index & (-index))
def _lowerCamelCase ( self , a__ , a__ ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
A_ : str = self.next_(UpperCAmelCase_ )
def _lowerCamelCase ( self , a__ , a__ ):
self.add(UpperCAmelCase_ , value - self.get(UpperCAmelCase_ ) )
def _lowerCamelCase ( self , a__ ):
if right == 0:
return 0
A_ : Optional[Any] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
A_ : str = self.prev(UpperCAmelCase_ )
return result
def _lowerCamelCase ( self , a__ , a__ ):
return self.prefix(UpperCAmelCase_ ) - self.prefix(UpperCAmelCase_ )
def _lowerCamelCase ( self , a__ ):
return self.query(UpperCAmelCase_ , index + 1 )
def _lowerCamelCase ( self , a__ ):
value -= self.tree[0]
if value < 0:
return -1
A_ : Union[str, Any] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
A_ : Union[str, Any] = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
from typing import List
from .keymap import KEYMAP, get_character
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
def decorator(_lowerCAmelCase ):
A_ : List[Any] = getattr(_lowerCAmelCase ,"""handle_key""" ,[] )
handle += [key]
setattr(_lowerCAmelCase ,"""handle_key""" ,_lowerCAmelCase )
return func
return decorator
def _lowerCAmelCase ( *_lowerCAmelCase ):
'''simple docstring'''
def decorator(_lowerCAmelCase ):
A_ : Tuple = getattr(_lowerCAmelCase ,"""handle_key""" ,[] )
handle += keys
setattr(_lowerCAmelCase ,"""handle_key""" ,_lowerCAmelCase )
return func
return decorator
class _UpperCAmelCase ( _lowerCamelCase ):
def __new__( cls , a__ , a__ , a__ ):
A_ : Any = super().__new__(cls , a__ , a__ , a__ )
if not hasattr(a__ , """key_handler""" ):
setattr(a__ , """key_handler""" , {} )
setattr(a__ , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
A_ : Tuple = getattr(a__ , """handle_key""" , [] )
for key in handled_keys:
A_ : Optional[Any] = value
return new_cls
@staticmethod
def _lowerCamelCase ( cls ):
A_ : List[str] = get_character()
if char != KEYMAP["undefined"]:
A_ : str = ord(a__ )
A_ : List[str] = cls.key_handler.get(a__ )
if handler:
A_ : Optional[int] = char
return handler(cls )
else:
return None
def _lowerCAmelCase ( cls ):
'''simple docstring'''
return KeyHandler(cls.__name__ ,cls.__bases__ ,cls.__dict__.copy() )
| 481
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : Optional[int] = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class A_ ( __lowercase ):
'''simple docstring'''
def __init__( self , *_A , **_A) -> None:
"""simple docstring"""
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A)
| 485
| 0
|
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCAmelCase : List[str] = logging.getLogger(__name__)
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : int = """masked_bert"""
def __init__( self , UpperCamelCase=3_0522 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=1E-12 , UpperCamelCase=0 , UpperCamelCase="topK" , UpperCamelCase="constant" , UpperCamelCase=0.0 , **UpperCamelCase , ) -> Dict:
super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = pruning_method
__lowerCAmelCase = mask_init
__lowerCAmelCase = mask_scale
| 39
|
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __lowerCAmelCase ( lowerCamelCase : bytes , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = f'''{sampling_rate}'''
__lowerCAmelCase = "1"
__lowerCAmelCase = "f32le"
__lowerCAmelCase = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(lowerCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
__lowerCAmelCase = ffmpeg_process.communicate(lowerCamelCase )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
__lowerCAmelCase = output_stream[0]
__lowerCAmelCase = np.frombuffer(lowerCamelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : str = "f32le" , ):
'''simple docstring'''
__lowerCAmelCase = f'''{sampling_rate}'''
__lowerCAmelCase = "1"
if format_for_conversion == "s16le":
__lowerCAmelCase = 2
elif format_for_conversion == "f32le":
__lowerCAmelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
__lowerCAmelCase = platform.system()
if system == "Linux":
__lowerCAmelCase = "alsa"
__lowerCAmelCase = "default"
elif system == "Darwin":
__lowerCAmelCase = "avfoundation"
__lowerCAmelCase = ":0"
elif system == "Windows":
__lowerCAmelCase = "dshow"
__lowerCAmelCase = "default"
__lowerCAmelCase = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
__lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__lowerCAmelCase = _ffmpeg_stream(lowerCamelCase , lowerCamelCase )
for item in iterator:
yield item
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[Tuple[float, float], float]] = None , lowerCamelCase : str = "f32le" , ):
'''simple docstring'''
if stream_chunk_s is not None:
__lowerCAmelCase = stream_chunk_s
else:
__lowerCAmelCase = chunk_length_s
__lowerCAmelCase = ffmpeg_microphone(lowerCamelCase , lowerCamelCase , format_for_conversion=lowerCamelCase )
if format_for_conversion == "s16le":
__lowerCAmelCase = np.intaa
__lowerCAmelCase = 2
elif format_for_conversion == "f32le":
__lowerCAmelCase = np.floataa
__lowerCAmelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
__lowerCAmelCase = chunk_length_s / 6
__lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCamelCase , (int, float) ):
__lowerCAmelCase = [stride_length_s, stride_length_s]
__lowerCAmelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__lowerCAmelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__lowerCAmelCase = datetime.datetime.now()
__lowerCAmelCase = datetime.timedelta(seconds=lowerCamelCase )
for item in chunk_bytes_iter(lowerCamelCase , lowerCamelCase , stride=(stride_left, stride_right) , stream=lowerCamelCase ):
# Put everything back in numpy scale
__lowerCAmelCase = np.frombuffer(item["raw"] , dtype=lowerCamelCase )
__lowerCAmelCase = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
__lowerCAmelCase = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __lowerCAmelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Tuple[int, int] , lowerCamelCase : bool = False ):
'''simple docstring'''
__lowerCAmelCase = B""
__lowerCAmelCase , __lowerCAmelCase = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
__lowerCAmelCase = 0
for raw in iterator:
acc += raw
if stream and len(lowerCamelCase ) < chunk_len:
__lowerCAmelCase = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCamelCase ) >= chunk_len:
# We are flushing the accumulator
__lowerCAmelCase = (_stride_left, stride_right)
__lowerCAmelCase = {"raw": acc[:chunk_len], "stride": stride}
if stream:
__lowerCAmelCase = False
yield item
__lowerCAmelCase = stride_left
__lowerCAmelCase = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCamelCase ) > stride_left:
__lowerCAmelCase = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
__lowerCAmelCase = False
yield item
def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCamelCase , stdout=subprocess.PIPE , bufsize=lowerCamelCase ) as ffmpeg_process:
while True:
__lowerCAmelCase = ffmpeg_process.stdout.read(lowerCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 39
| 1
|
"""simple docstring"""
from __future__ import annotations
def A_ ( __lowercase , __lowercase ):
UpperCamelCase_ : int =get_failure_array(_lowercase )
# 2) Step through text searching for pattern
UpperCamelCase_ : int =0, 0 # index into text, pattern
while i < len(_lowercase ):
if pattern[j] == text[i]:
if j == (len(_lowercase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
UpperCamelCase_ : Optional[Any] =failure[j - 1]
continue
i += 1
return False
def A_ ( __lowercase ):
UpperCamelCase_ : Optional[Any] =[0]
UpperCamelCase_ : str =0
UpperCamelCase_ : List[Any] =1
while j < len(_lowercase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
UpperCamelCase_ : Union[str, Any] =failure[i - 1]
continue
j += 1
failure.append(_lowercase )
return failure
if __name__ == "__main__":
# Test 1)
__SCREAMING_SNAKE_CASE = """abc1abc12"""
__SCREAMING_SNAKE_CASE = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
__SCREAMING_SNAKE_CASE = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__SCREAMING_SNAKE_CASE = """ABABX"""
__SCREAMING_SNAKE_CASE = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
__SCREAMING_SNAKE_CASE = """AAAB"""
__SCREAMING_SNAKE_CASE = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
__SCREAMING_SNAKE_CASE = """abcdabcy"""
__SCREAMING_SNAKE_CASE = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
__SCREAMING_SNAKE_CASE = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 357
|
"""simple docstring"""
def lowercase_ ( _lowercase : int ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError("only integers accepted as input" )
else:
UpperCAmelCase : Optional[int] = str(abs(_lowercase ) )
UpperCAmelCase : Union[str, Any] = [list(_lowercase ) for char in range(len(_lowercase ) )]
for index in range(len(_lowercase ) ):
num_transpositions[index].pop(_lowercase )
return max(
int("".join(list(_lowercase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 595
| 0
|
from math import factorial
def UpperCAmelCase ( lowercase__ : int = 20 ):
'''simple docstring'''
a__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
a__ = n // 2
return int(factorial(lowercase__ ) / (factorial(lowercase__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
_lowercase : Dict =int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 412
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowercase : int =logging.get_logger(__name__)
_lowercase : List[str] ={
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class lowerCAmelCase_ ( A_ ,A_ ):
'''simple docstring'''
A_ : Optional[Any] = 'resnet'
A_ : Any = ['basic', 'bottleneck']
def __init__( self , lowerCamelCase=3 , lowerCamelCase=64 , lowerCamelCase=[256, 512, 1024, 2048] , lowerCamelCase=[3, 4, 6, 3] , lowerCamelCase="bottleneck" , lowerCamelCase="relu" , lowerCamelCase=False , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
a__ = num_channels
a__ = embedding_size
a__ = hidden_sizes
a__ = depths
a__ = layer_type
a__ = hidden_act
a__ = downsample_in_first_stage
a__ = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(lowerCamelCase ) + 1 )]
a__ , a__ = get_aligned_output_features_output_indices(
out_features=lowerCamelCase , out_indices=lowerCamelCase , stage_names=self.stage_names )
class lowerCAmelCase_ ( A_ ):
'''simple docstring'''
A_ : int = version.parse('1.11' )
@property
def _A ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _A ( self ):
'''simple docstring'''
return 1e-3
| 412
| 1
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowercase ( __lowerCamelCase ):
def __init__( self : Optional[Any] ,A : str ,A : Tuple ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCAmelCase__ : Any = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=A ,scheduler=A )
@torch.no_grad()
def __call__( self : List[Any] ,A : int = 1 ,A : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,A : float = 0.0 ,A : int = 50 ,A : Optional[bool] = None ,A : Optional[str] = "pil" ,A : bool = True ,):
'''simple docstring'''
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size ,A ):
UpperCAmelCase__ : Optional[int] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
UpperCAmelCase__ : str = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(A ,A ) and len(A ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(A )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
UpperCAmelCase__ : Optional[Any] = randn_tensor(A ,generator=A ,device=self.device ,dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase__ : Tuple = self.unet(A ,A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCAmelCase__ : Tuple = self.scheduler.step(
A ,A ,A ,eta=A ,use_clipped_model_output=A ,generator=A ).prev_sample
UpperCAmelCase__ : Optional[Any] = (image / 2 + 0.5).clamp(0 ,1 )
UpperCAmelCase__ : Optional[int] = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
UpperCAmelCase__ : List[str] = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 65
|
"""simple docstring"""
import requests
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = {"""Content-Type""": """application/json"""}
UpperCAmelCase__ : Optional[Any] = requests.post(__UpperCamelCase , json={"""text""": message_body} , headers=__UpperCamelCase )
if response.status_code != 200:
UpperCAmelCase__ : Any = (
"""Request to slack returned an error """
F"{response.status_code}, the response is:\n{response.text}"
)
raise ValueError(__UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 65
| 1
|
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__UpperCAmelCase = logging.get_logger(__name__)
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = to_pil_image(lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = pil_image.size
SCREAMING_SNAKE_CASE : List[str] = pytesseract.image_to_data(lowerCamelCase_ , lang=lowerCamelCase_ , output_type="""dict""" , config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE : int = [idx for idx, word in enumerate(lowerCamelCase_ ) if not word.strip()]
SCREAMING_SNAKE_CASE : int = [word for idx, word in enumerate(lowerCamelCase_ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE : str = [coord for idx, coord in enumerate(lowerCamelCase_ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE : int = [coord for idx, coord in enumerate(lowerCamelCase_ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE : str = [coord for idx, coord in enumerate(lowerCamelCase_ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE : int = [coord for idx, coord in enumerate(lowerCamelCase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE : Tuple = []
for x, y, w, h in zip(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Dict = [x, y, x + w, y + h]
actual_boxes.append(lowerCamelCase_ )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE : Optional[Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''pixel_values''']
def __init__( self : Optional[Any] , lowerCamelCase_ : bool = True , lowerCamelCase_ : Dict[str, int] = None , lowerCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase_ : bool = True , lowerCamelCase_ : float = 1 / 2_55 , lowerCamelCase_ : bool = True , lowerCamelCase_ : Union[float, Iterable[float]] = None , lowerCamelCase_ : Union[float, Iterable[float]] = None , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[str] = "" , **lowerCamelCase_ : int , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = size if size is not None else {"""height""": 2_24, """width""": 2_24}
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = do_resize
SCREAMING_SNAKE_CASE : str = size
SCREAMING_SNAKE_CASE : Any = resample
SCREAMING_SNAKE_CASE : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE : int = rescale_value
SCREAMING_SNAKE_CASE : List[str] = do_normalize
SCREAMING_SNAKE_CASE : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
SCREAMING_SNAKE_CASE : List[str] = apply_ocr
SCREAMING_SNAKE_CASE : List[str] = ocr_lang
SCREAMING_SNAKE_CASE : List[str] = tesseract_config
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE : List[str] = (size["""height"""], size["""width"""])
return resize(lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Union[int, float] , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : List[str] , ):
'''simple docstring'''
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Union[float, Iterable[float]] , lowerCamelCase_ : Union[float, Iterable[float]] , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : Optional[int] , ):
'''simple docstring'''
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : ImageInput , lowerCamelCase_ : bool = None , lowerCamelCase_ : Dict[str, int] = None , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : bool = None , lowerCamelCase_ : float = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : Union[float, Iterable[float]] = None , lowerCamelCase_ : Union[float, Iterable[float]] = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[Union[str, TensorType]] = None , lowerCamelCase_ : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase_ : List[str] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : str = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Any = get_size_dict(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : int = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : Dict = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : Any = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : List[str] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : List[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE : Union[str, Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE : List[Any] = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""If do_normalize is True, image_mean and image_std must be specified.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Any = [to_numpy_array(lowerCamelCase_ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , """pytesseract""" )
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Optional[Any] = []
for image in images:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = apply_tesseract(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
words_batch.append(lowerCamelCase_ )
boxes_batch.append(lowerCamelCase_ )
if do_resize:
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Any = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : List[Any] = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images]
SCREAMING_SNAKE_CASE : Optional[Any] = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
SCREAMING_SNAKE_CASE : List[Any] = BatchFeature(data={"""pixel_values""": images} , tensor_type=lowerCamelCase_ )
if apply_ocr:
SCREAMING_SNAKE_CASE : str = words_batch
SCREAMING_SNAKE_CASE : Any = boxes_batch
return data
| 79
|
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__UpperCAmelCase = """"""
__UpperCAmelCase = """"""
__UpperCAmelCase = """"""
__UpperCAmelCase = 1 # (0 is vertical, 1 is horizontal)
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = get_dataset(lowerCamelCase_ , lowerCamelCase_ )
print("""Processing...""" )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = update_image_and_anno(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for index, image in enumerate(lowerCamelCase_ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
SCREAMING_SNAKE_CASE : Optional[int] = random_chars(32 )
SCREAMING_SNAKE_CASE : Optional[Any] = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
SCREAMING_SNAKE_CASE : Dict = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(f'''/{file_root}.jpg''' , lowerCamelCase_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Success {index+1}/{len(lowerCamelCase_ )} with {file_name}''' )
SCREAMING_SNAKE_CASE : Optional[Any] = []
for anno in new_annos[index]:
SCREAMING_SNAKE_CASE : Optional[Any] = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(lowerCamelCase_ )
with open(f'''/{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : Any = []
for label_file in glob.glob(os.path.join(lowerCamelCase_ , """*.txt""" ) ):
SCREAMING_SNAKE_CASE : str = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(lowerCamelCase_ ) as in_file:
SCREAMING_SNAKE_CASE : Any = in_file.readlines()
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(lowerCamelCase_ , f'''{label_name}.jpg''' )
SCREAMING_SNAKE_CASE : Tuple = []
for obj_list in obj_lists:
SCREAMING_SNAKE_CASE : Union[str, Any] = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(lowerCamelCase_ )
labels.append(lowerCamelCase_ )
return img_paths, labels
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Optional[Any] = []
for idx in range(len(lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : Dict = img_list[idx]
path_list.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = anno_list[idx]
SCREAMING_SNAKE_CASE : Optional[Any] = cva.imread(lowerCamelCase_ )
if flip_type == 1:
SCREAMING_SNAKE_CASE : List[str] = cva.flip(lowerCamelCase_ , lowerCamelCase_ )
for bbox in img_annos:
SCREAMING_SNAKE_CASE : List[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
SCREAMING_SNAKE_CASE : Any = cva.flip(lowerCamelCase_ , lowerCamelCase_ )
for bbox in img_annos:
SCREAMING_SNAKE_CASE : Optional[Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(lowerCamelCase_ )
new_imgs_list.append(lowerCamelCase_ )
return new_imgs_list, new_annos_lists, path_list
def __A ( lowerCamelCase_ = 32 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
SCREAMING_SNAKE_CASE : Dict = ascii_lowercase + digits
return "".join(random.choice(lowerCamelCase_ ) for _ in range(lowerCamelCase_ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 79
| 1
|
import math
def lowercase ( __A : int ) -> List[str]:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase ( __A : float = 0.1 ) -> Any:
'''simple docstring'''
snake_case : List[Any] = 3
snake_case : int = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(a_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36
|
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def SCREAMING_SNAKE_CASE ( a_ : str , a_ : Union[str, Any] , a_ : Dict ):
__a = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__a = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
__a = f"{src_lang}-{tgt_lang}"
__a = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(a_ , exist_ok=a_ )
__a = os.path.join(a_ , 'README.md' )
print(f"Generating {path}" )
with open(a_ , 'w' , encoding='utf-8' ) as f:
f.write(a_ )
# make sure we are under the root of the project
UpperCAmelCase_ = Path(__file__).resolve().parent.parent.parent
UpperCAmelCase_ = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = model_name.split("-")
UpperCAmelCase_ = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 539
| 0
|
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _lowerCAmelCase ( __a , __a , __a=[] ) -> int:
'''simple docstring'''
_UpperCamelCase :int =size[0] - overlap_pixels * 2
_UpperCamelCase :Any =size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
_UpperCamelCase :List[Any] =np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
_UpperCamelCase :List[str] =np.pad(__a , mode="""linear_ramp""" , pad_width=__a , end_values=0 )
if "l" in remove_borders:
_UpperCamelCase :Dict =mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
_UpperCamelCase :Optional[Any] =mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
_UpperCamelCase :List[Any] =mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
_UpperCamelCase :Optional[int] =mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _lowerCAmelCase ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
return max(__a , min(__a , __a ) )
def _lowerCAmelCase ( __a , __a , __a ) -> int:
'''simple docstring'''
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _lowerCAmelCase ( __a , __a , __a ) -> Dict:
'''simple docstring'''
_UpperCamelCase :Any =list(__a )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
_UpperCamelCase :str =clamp_rect(__a , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _lowerCAmelCase ( __a , __a , __a , __a ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase :int =Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(__a , (original_slice, 0) )
return result
def _lowerCAmelCase ( __a , __a ) -> int:
'''simple docstring'''
_UpperCamelCase :Dict =(original_image_slice * 4, 0, tile.size[0], tile.size[1])
_UpperCamelCase :Optional[int] =tile.crop(__a )
return tile
def _lowerCAmelCase ( __a , __a ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase :Optional[int] =n % d
return n - divisor
class lowerCamelCase__ ( __snake_case ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 350 , ) -> List[Any]:
"""simple docstring"""
super().__init__(
vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , unet=lowerCAmelCase__ , low_res_scheduler=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , max_noise_level=lowerCAmelCase__ , )
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCamelCase :str =(
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
_UpperCamelCase :Union[str, Any] =add_overlap_rect(lowerCAmelCase__ , lowerCAmelCase__ , image.size )
_UpperCamelCase :str =image.crop(lowerCAmelCase__ )
_UpperCamelCase :Union[str, Any] =((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
_UpperCamelCase :int =translated_slice_x - (original_image_slice / 2)
_UpperCamelCase :List[Any] =max(0 , lowerCAmelCase__ )
_UpperCamelCase :Dict =squeeze_tile(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase :Any =to_input.size
_UpperCamelCase :Union[str, Any] =to_input.resize((tile_size, tile_size) , Image.BICUBIC )
_UpperCamelCase :str =super(lowerCAmelCase__ , self ).__call__(image=lowerCAmelCase__ , **lowerCAmelCase__ ).images[0]
_UpperCamelCase :Any =upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
_UpperCamelCase :List[Any] =unsqueeze_tile(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase :Tuple =upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
_UpperCamelCase :int =[]
if x == 0:
remove_borders.append("""l""" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("""r""" )
if y == 0:
remove_borders.append("""t""" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("""b""" )
_UpperCamelCase :int =Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=lowerCAmelCase__ ) , mode="""L""" , )
final_image.paste(
lowerCAmelCase__ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , lowerCAmelCase__ )
@torch.no_grad()
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 75 , lowerCAmelCase__ = 9.0 , lowerCAmelCase__ = 50 , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 128 , lowerCAmelCase__ = 32 , lowerCAmelCase__ = 32 , ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :List[Any] =Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) )
_UpperCamelCase :Any =math.ceil(image.size[0] / tile_size )
_UpperCamelCase :str =math.ceil(image.size[1] / tile_size )
_UpperCamelCase :Any =tcx * tcy
_UpperCamelCase :Tuple =0
for y in range(lowerCAmelCase__ ):
for x in range(lowerCAmelCase__ ):
self._process_tile(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , prompt=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , noise_level=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , )
current_count += 1
if callback is not None:
callback({"""progress""": current_count / total_tile_count, """image""": final_image} )
return final_image
def _lowerCAmelCase ( ) -> int:
'''simple docstring'''
_UpperCamelCase :Optional[Any] ="""stabilityai/stable-diffusion-x4-upscaler"""
_UpperCamelCase :Union[str, Any] =StableDiffusionTiledUpscalePipeline.from_pretrained(__a , revision="""fp16""" , torch_dtype=torch.floataa )
_UpperCamelCase :Optional[int] =pipe.to("""cuda""" )
_UpperCamelCase :Tuple =Image.open("""../../docs/source/imgs/diffusers_library.jpg""" )
def callback(__a ):
print(F'''progress: {obj['progress']:.4f}''' )
obj["image"].save("""diffusers_library_progress.jpg""" )
_UpperCamelCase :List[Any] =pipe(image=__a , prompt="""Black font, white background, vector""" , noise_level=40 , callback=__a )
final_image.save("""diffusers_library.jpg""" )
if __name__ == "__main__":
main()
| 706
|
'''simple docstring'''
def _lowerCAmelCase ( __a , __a ) -> float:
'''simple docstring'''
def get_matched_characters(__a , __a ) -> str:
_UpperCamelCase :Any =[]
_UpperCamelCase :List[str] =min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_UpperCamelCase :int =int(max(0 , i - limit ) )
_UpperCamelCase :List[Any] =int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__a )
_UpperCamelCase :Optional[int] =F'''{_stra[0:_stra.index(__a )]} {_stra[_stra.index(__a ) + 1:]}'''
return "".join(__a )
# matching characters
_UpperCamelCase :str =get_matched_characters(__a , __a )
_UpperCamelCase :List[Any] =get_matched_characters(__a , __a )
_UpperCamelCase :List[str] =len(__a )
# transposition
_UpperCamelCase :Optional[Any] =(
len([(ca, ca) for ca, ca in zip(__a , __a ) if ca != ca] ) // 2
)
if not match_count:
_UpperCamelCase :List[str] =0.0
else:
_UpperCamelCase :Union[str, Any] =(
1
/ 3
* (
match_count / len(__a )
+ match_count / len(__a )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_UpperCamelCase :int =0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 512
| 0
|
'''simple docstring'''
import math
def _lowerCAmelCase ( __snake_case : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( __snake_case : float = 0.1 ) -> int:
__A : Any = 3
__A : Optional[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : Tuple = 'vit_msn'
def __init__( self : List[Any] , lowerCamelCase__ : Any=768 , lowerCamelCase__ : Dict=12 , lowerCamelCase__ : str=12 , lowerCamelCase__ : Union[str, Any]=3_072 , lowerCamelCase__ : str="gelu" , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : Optional[int]=0.0_2 , lowerCamelCase__ : List[Any]=1e-0_6 , lowerCamelCase__ : Tuple=224 , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : Union[str, Any]=3 , lowerCamelCase__ : int=True , **lowerCamelCase__ : Optional[int] , ) -> int:
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = qkv_bias
| 332
| 0
|
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
UpperCamelCase__ : Dict = b * b - 4 * a * c
UpperCamelCase__ : int = (-b + sqrt(SCREAMING_SNAKE_CASE )) / (2 * a)
UpperCamelCase__ : List[Any] = (-b - sqrt(SCREAMING_SNAKE_CASE )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Tuple = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 718
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__UpperCamelCase : Any = None
try:
import msvcrt
except ImportError:
__UpperCamelCase : Optional[Any] = None
try:
import fcntl
except ImportError:
__UpperCamelCase : Union[str, Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__UpperCamelCase : Any = OSError
# Data
# ------------------------------------------------
__UpperCamelCase : Optional[int] = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
__UpperCamelCase : Dict = "3.0.12"
__UpperCamelCase : str = None
def _a ( ):
"""simple docstring"""
global _logger
UpperCamelCase__ : Tuple = _logger or logging.getLogger(__name__ )
return _logger
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : Any , lowerCamelCase__ : List[Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ : List[str] = lock_file
return None
def __str__( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class __magic_name__ :
def __init__( self : List[str] , lowerCamelCase__ : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = lock
return None
def __enter__( self : Tuple ) -> str:
'''simple docstring'''
return self.lock
def __exit__( self : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : str ) -> Any:
'''simple docstring'''
self.lock.release()
return None
class __magic_name__ :
def __init__( self : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[str]=-1 , lowerCamelCase__ : Any=None ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : int = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
UpperCamelCase__ : List[Any] = self.hash_filename_if_too_long(lowerCamelCase__ , lowerCamelCase__ )
# The path to the lock file.
UpperCamelCase__ : Tuple = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
UpperCamelCase__ : int = None
# The default timeout value.
UpperCamelCase__ : Optional[Any] = timeout
# We use this lock primarily for the lock counter.
UpperCamelCase__ : List[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
UpperCamelCase__ : Any = 0
return None
@property
def UpperCAmelCase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
return self._lock_file
@property
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self._timeout
@timeout.setter
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Any ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : List[str] = float(lowerCamelCase__ )
return None
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError()
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
raise NotImplementedError()
@property
def UpperCAmelCase__ ( self : Any ) -> Dict:
'''simple docstring'''
return self._lock_file_fd is not None
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : List[str]=0.05 ) -> int:
'''simple docstring'''
if timeout is None:
UpperCamelCase__ : int = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
UpperCamelCase__ : int = id(self )
UpperCamelCase__ : List[Any] = self._lock_file
UpperCamelCase__ : Dict = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(lowerCamelCase__ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
UpperCamelCase__ : List[Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[int]=False ) -> Any:
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
UpperCamelCase__ : List[str] = id(self )
UpperCamelCase__ : Union[str, Any] = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
UpperCamelCase__ : Optional[Any] = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
self.acquire()
return self
def __exit__( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : Any ) -> Optional[Any]:
'''simple docstring'''
self.release()
return None
def __del__( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
self.release(force=lowerCamelCase__ )
return None
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : int ) -> str:
'''simple docstring'''
UpperCamelCase__ : str = os.path.basename(lowerCamelCase__ )
if len(lowerCamelCase__ ) > max_length and max_length > 0:
UpperCamelCase__ : Optional[int] = os.path.dirname(lowerCamelCase__ )
UpperCamelCase__ : List[str] = str(hash(lowerCamelCase__ ) )
UpperCamelCase__ : Optional[int] = filename[: max_length - len(lowerCamelCase__ ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(lowerCamelCase__ , lowerCamelCase__ )
else:
return path
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : int , lowerCamelCase__ : Dict , lowerCamelCase__ : str=-1 , lowerCamelCase__ : int=None ) -> Optional[int]:
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(lowerCamelCase__ , timeout=lowerCamelCase__ , max_filename_length=lowerCamelCase__ )
UpperCamelCase__ : int = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
UpperCamelCase__ : Union[str, Any] = os.open(self._lock_file , lowerCamelCase__ )
except OSError:
pass
else:
try:
msvcrt.locking(lowerCamelCase__ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(lowerCamelCase__ )
else:
UpperCamelCase__ : Union[str, Any] = fd
return None
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : str = self._lock_file_fd
UpperCamelCase__ : List[str] = None
msvcrt.locking(lowerCamelCase__ , msvcrt.LK_UNLCK , 1 )
os.close(lowerCamelCase__ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : str=-1 , lowerCamelCase__ : int=None ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = os.statvfs(os.path.dirname(lowerCamelCase__ ) ).f_namemax
super().__init__(lowerCamelCase__ , timeout=lowerCamelCase__ , max_filename_length=lowerCamelCase__ )
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
UpperCamelCase__ : int = os.open(self._lock_file , lowerCamelCase__ )
try:
fcntl.flock(lowerCamelCase__ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowerCamelCase__ )
else:
UpperCamelCase__ : Any = fd
return None
def UpperCAmelCase__ ( self : int ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Tuple = self._lock_file_fd
UpperCamelCase__ : int = None
fcntl.flock(lowerCamelCase__ , fcntl.LOCK_UN )
os.close(lowerCamelCase__ )
return None
class __magic_name__ ( __lowerCAmelCase):
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : str = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
UpperCamelCase__ : Any = os.open(self._lock_file , lowerCamelCase__ )
except OSError:
pass
else:
UpperCamelCase__ : Optional[Any] = fd
return None
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
os.close(self._lock_file_fd )
UpperCamelCase__ : List[str] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__UpperCamelCase : Tuple = None
if msvcrt:
__UpperCamelCase : str = WindowsFileLock
elif fcntl:
__UpperCamelCase : Optional[Any] = UnixFileLock
else:
__UpperCamelCase : Optional[Any] = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 106
| 0
|
'''simple docstring'''
def _snake_case ( _SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
if edge <= 0 or not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError("""Length must be a positive.""" )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _snake_case ( _SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
if edge <= 0 or not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError("""Length must be a positive.""" )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 433
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCAmelCase = None
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase = {
'facebook/nllb-large-en-ro': 1024,
'facebook/nllb-200-distilled-600M': 1024,
}
# fmt: off
UpperCAmelCase = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Optional[int] = ["input_ids", "attention_mask"]
UpperCAmelCase : List[str] = NllbTokenizer
UpperCAmelCase : List[int] = []
UpperCAmelCase : List[int] = []
def __init__( self , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=None , A_=None , A_=None , A_=False , **A_ , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
lowerCAmelCase = legacy_behaviour
super().__init__(
vocab_file=A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , src_lang=A_ , tgt_lang=A_ , additional_special_tokens=A_ , legacy_behaviour=A_ , **A_ , )
lowerCAmelCase = vocab_file
lowerCAmelCase = False if not self.vocab_file else True
lowerCAmelCase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
lowerCAmelCase = {
lang_code: self.convert_tokens_to_ids(A_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCAmelCase = src_lang if src_lang is not None else """eng_Latn"""
lowerCAmelCase = self.convert_tokens_to_ids(self._src_lang )
lowerCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __snake_case ( self ) -> str:
return self._src_lang
@src_lang.setter
def __snake_case ( self , A_ ) -> None:
lowerCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __snake_case ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __snake_case ( self , A_ , A_ = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __snake_case ( self , A_ , A_ , A_ , A_ , **A_ ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
lowerCAmelCase = src_lang
lowerCAmelCase = self(A_ , add_special_tokens=A_ , return_tensors=A_ , **A_ )
lowerCAmelCase = self.convert_tokens_to_ids(A_ )
lowerCAmelCase = tgt_lang_id
return inputs
def __snake_case ( self , A_ , A_ = "eng_Latn" , A_ = None , A_ = "fra_Latn" , **A_ , ) -> BatchEncoding:
lowerCAmelCase = src_lang
lowerCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def __snake_case ( self ) -> Optional[int]:
return self.set_src_lang_special_tokens(self.src_lang )
def __snake_case ( self ) -> Union[str, Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __snake_case ( self , A_ ) -> None:
lowerCAmelCase = self.convert_tokens_to_ids(A_ )
if self.legacy_behaviour:
lowerCAmelCase = []
lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase = [self.cur_lang_code]
lowerCAmelCase = [self.eos_token_id]
lowerCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __snake_case ( self , A_ ) -> None:
lowerCAmelCase = self.convert_tokens_to_ids(A_ )
if self.legacy_behaviour:
lowerCAmelCase = []
lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase = [self.cur_lang_code]
lowerCAmelCase = [self.eos_token_id]
lowerCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __snake_case ( self , A_ , A_ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCAmelCase = os.path.join(
A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 433
| 1
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """time_series_transformer"""
UpperCamelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self :str , __snake_case :Optional[int] = None , __snake_case :Optional[int] = None , __snake_case :str = "student_t" , __snake_case :str = "nll" , __snake_case :int = 1 , __snake_case :List[int] = [1, 2, 3, 4, 5, 6, 7] , __snake_case :Optional[Union[str, bool]] = "mean" , __snake_case :int = 0 , __snake_case :int = 0 , __snake_case :int = 0 , __snake_case :int = 0 , __snake_case :Optional[List[int]] = None , __snake_case :Optional[List[int]] = None , __snake_case :int = 32 , __snake_case :int = 32 , __snake_case :int = 2 , __snake_case :int = 2 , __snake_case :int = 2 , __snake_case :int = 2 , __snake_case :bool = True , __snake_case :str = "gelu" , __snake_case :int = 64 , __snake_case :float = 0.1 , __snake_case :float = 0.1 , __snake_case :float = 0.1 , __snake_case :float = 0.1 , __snake_case :float = 0.1 , __snake_case :int = 1_00 , __snake_case :float = 0.02 , __snake_case :Optional[int]=True , **__snake_case :Any , ):
'''simple docstring'''
__magic_name__ : Any =prediction_length
__magic_name__ : str =context_length or prediction_length
__magic_name__ : List[Any] =distribution_output
__magic_name__ : List[str] =loss
__magic_name__ : str =input_size
__magic_name__ : List[Any] =num_time_features
__magic_name__ : Dict =lags_sequence
__magic_name__ : int =scaling
__magic_name__ : Optional[int] =num_dynamic_real_features
__magic_name__ : str =num_static_real_features
__magic_name__ : int =num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__snake_case ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
__magic_name__ : Any =cardinality
else:
__magic_name__ : Union[str, Any] =[0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__snake_case ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
__magic_name__ : Dict =embedding_dimension
else:
__magic_name__ : Union[str, Any] =[min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__magic_name__ : Optional[Any] =num_parallel_samples
# Transformer architecture configuration
__magic_name__ : List[str] =input_size * len(__snake_case ) + self._number_of_features
__magic_name__ : Union[str, Any] =d_model
__magic_name__ : Union[str, Any] =encoder_attention_heads
__magic_name__ : Optional[Any] =decoder_attention_heads
__magic_name__ : List[str] =encoder_ffn_dim
__magic_name__ : str =decoder_ffn_dim
__magic_name__ : Dict =encoder_layers
__magic_name__ : Any =decoder_layers
__magic_name__ : int =dropout
__magic_name__ : int =attention_dropout
__magic_name__ : Tuple =activation_dropout
__magic_name__ : Tuple =encoder_layerdrop
__magic_name__ : Optional[Any] =decoder_layerdrop
__magic_name__ : str =activation_function
__magic_name__ : Union[str, Any] =init_std
__magic_name__ : str =use_cache
super().__init__(is_encoder_decoder=__snake_case , **__snake_case )
@property
def A__ ( self :Optional[Any] ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 367
|
import pytest
import datasets
# Import fixture modules as plugins
UpperCAmelCase_ : Union[str, Any] = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def lowerCAmelCase_ ( lowerCamelCase ):
config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
__magic_name__ : Tuple =tmp_path_factory.getbasetemp() / """cache"""
__magic_name__ : Optional[int] =test_hf_cache_home / """datasets"""
__magic_name__ : List[str] =test_hf_cache_home / """metrics"""
__magic_name__ : List[str] =test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(lowerCamelCase ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(lowerCamelCase ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(lowerCamelCase ) )
__magic_name__ : Optional[Any] =test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(lowerCamelCase ) )
__magic_name__ : Tuple =test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(lowerCamelCase ) )
@pytest.fixture(autouse=lowerCamelCase , scope="""session""" )
def lowerCAmelCase_ ( ):
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase ):
# don't take tests into account when counting downloads
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , lowerCamelCase )
@pytest.fixture
def lowerCAmelCase_ ( lowerCamelCase ):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , lowerCamelCase )
| 367
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowercase__ :
_UpperCAmelCase :int
_UpperCAmelCase :int
class lowercase__ :
def __init__( self : Dict , snake_case__ : int ):
lowerCamelCase_ : list[list[Edge]] =[[] for _ in range(snake_case__ )]
lowerCamelCase_ : Tuple =size
def __getitem__( self : Optional[Any] , snake_case__ : int ):
return iter(self._graph[vertex] )
@property
def UpperCAmelCase__ ( self : List[str] ):
return self._size
def UpperCAmelCase__ ( self : Any , snake_case__ : int , snake_case__ : int , snake_case__ : int ):
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(snake_case__ , snake_case__ ) )
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : int , snake_case__ : int ):
lowerCamelCase_ : Optional[Any] =deque([start_vertex] )
lowerCamelCase_ : list[int | None] =[None] * self.size
lowerCamelCase_ : List[str] =0
while queue:
lowerCamelCase_ : List[Any] =queue.popleft()
lowerCamelCase_ : Optional[Any] =distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowerCamelCase_ : Any =current_distance + edge.weight
lowerCamelCase_ : Optional[int] =distances[edge.destination_vertex]
if (
isinstance(snake_case__ , snake_case__ )
and new_distance >= dest_vertex_distance
):
continue
lowerCamelCase_ : Optional[int] =new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 153
|
"""simple docstring"""
from pathlib import Path
import fire
def _snake_case ( lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : int ) -> int:
lowerCamelCase_ : Any =Path(lowerCamelCase__ )
lowerCamelCase_ : Optional[Any] =Path(lowerCamelCase__ )
dest_dir.mkdir(exist_ok=lowerCamelCase__ )
for path in src_dir.iterdir():
lowerCamelCase_ : Optional[Any] =[x.rstrip() for x in list(path.open().readlines() )][:n]
lowerCamelCase_ : Tuple =dest_dir.joinpath(path.name )
print(lowerCamelCase__ )
dest_path.open("w" ).write("\n".join(lowerCamelCase__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 153
| 1
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
__A =get_logger(__name__)
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=0 ):
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
with FSDP.state_dict_type(
lowerCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowerCamelCase_ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowerCamelCase_ = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
lowerCamelCase_ = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
if accelerator.process_index == 0:
logger.info(F'Saving model to {output_model_file}' )
torch.save(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowerCamelCase_ = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
lowerCamelCase_ = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F'Saving model to {output_model_file}' )
torch.save(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowerCamelCase_ = os.path.join(lowerCamelCase__ , F'{MODEL_NAME}_{model_index}' )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
logger.info(F'Saving model to {ckpt_dir}' )
lowerCamelCase_ = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=lowerCamelCase__ , storage_writer=dist_cp.FileSystemWriter(lowerCamelCase__ ) , planner=DefaultSavePlanner() , )
logger.info(F'Model saved to {ckpt_dir}' )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowerCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(lowerCamelCase__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
lowerCamelCase_ = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
lowerCamelCase_ = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F'Loading model from {input_model_file}' )
lowerCamelCase_ = torch.load(lowerCamelCase__ )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowerCamelCase_ = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
lowerCamelCase_ = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F'Loading model from {input_model_file}' )
lowerCamelCase_ = torch.load(lowerCamelCase__ )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowerCamelCase_ = (
os.path.join(lowerCamelCase__ , F'{MODEL_NAME}_{model_index}' )
if F'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading model from {ckpt_dir}' )
lowerCamelCase_ = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=lowerCamelCase__ , storage_reader=dist_cp.FileSystemReader(lowerCamelCase__ ) , planner=DefaultLoadPlanner() , )
lowerCamelCase_ = state_dict["model"]
logger.info(F'Model loaded from {ckpt_dir}' )
model.load_state_dict(lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=0 ):
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
with FSDP.state_dict_type(
lowerCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowerCamelCase_ = FSDP.optim_state_dict(lowerCamelCase__ , lowerCamelCase__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
lowerCamelCase_ = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
lowerCamelCase_ = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F'Saving Optimizer state to {output_optimizer_file}' )
torch.save(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F'Optimizer state saved in {output_optimizer_file}' )
else:
lowerCamelCase_ = os.path.join(lowerCamelCase__ , F'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
logger.info(F'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(lowerCamelCase__ ) , planner=DefaultSavePlanner() , )
logger.info(F'Optimizer state saved in {ckpt_dir}' )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowerCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowerCamelCase_ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
lowerCamelCase_ = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
lowerCamelCase_ = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F'Loading Optimizer state from {input_optimizer_file}' )
lowerCamelCase_ = torch.load(lowerCamelCase__ )
logger.info(F'Optimizer state loaded from {input_optimizer_file}' )
else:
lowerCamelCase_ = (
os.path.join(lowerCamelCase__ , F'{OPTIMIZER_NAME}_{optimizer_index}' )
if F'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading Optimizer from {ckpt_dir}' )
lowerCamelCase_ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(lowerCamelCase__ ) , )
lowerCamelCase_ = optim_state["optimizer"]
logger.info(F'Optimizer loaded from {ckpt_dir}' )
lowerCamelCase_ = FSDP.optim_state_dict_to_load(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
optimizer.load_state_dict(lowerCamelCase__ )
| 313
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
__A =logging.get_logger(__name__)
# General docstring
__A ='''RegNetConfig'''
# Base docstring
__A ='''facebook/regnet-y-040'''
__A =[1, 1_0_8_8, 7, 7]
# Image classification docstring
__A ='''facebook/regnet-y-040'''
__A ='''tabby, tabby cat'''
__A =[
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase = 3 , lowercase = 1 , lowercase = 1 , lowercase = "relu" , ) -> Dict:
super().__init__()
lowerCamelCase_ = nn.Convad(
lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=kernel_size // 2 , groups=lowercase , bias=lowercase , )
lowerCamelCase_ = nn.BatchNormad(lowercase )
lowerCamelCase_ = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Optional[Any]:
lowerCamelCase_ = self.convolution(lowercase )
lowerCamelCase_ = self.normalization(lowercase )
lowerCamelCase_ = self.activation(lowercase )
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase ) -> List[Any]:
super().__init__()
lowerCamelCase_ = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowerCamelCase_ = config.num_channels
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Union[str, Any]:
lowerCamelCase_ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
lowerCamelCase_ = self.embedder(lowercase )
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase = 2 ) -> List[str]:
super().__init__()
lowerCamelCase_ = nn.Convad(lowercase , lowercase , kernel_size=1 , stride=lowercase , bias=lowercase )
lowerCamelCase_ = nn.BatchNormad(lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Tensor:
lowerCamelCase_ = self.convolution(lowercase )
lowerCamelCase_ = self.normalization(lowercase )
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase , lowercase ) -> List[Any]:
super().__init__()
lowerCamelCase_ = nn.AdaptiveAvgPoolad((1, 1) )
lowerCamelCase_ = nn.Sequential(
nn.Convad(lowercase , lowercase , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase , lowercase , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Union[str, Any]:
# b c h w -> b c 1 1
lowerCamelCase_ = self.pooler(lowercase )
lowerCamelCase_ = self.attention(lowercase )
lowerCamelCase_ = hidden_state * attention
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 ) -> int:
super().__init__()
lowerCamelCase_ = in_channels != out_channels or stride != 1
lowerCamelCase_ = max(1 , out_channels // config.groups_width )
lowerCamelCase_ = (
RegNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
lowerCamelCase_ = nn.Sequential(
RegNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase , lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act ) , RegNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , )
lowerCamelCase_ = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Dict:
lowerCamelCase_ = hidden_state
lowerCamelCase_ = self.layer(lowercase )
lowerCamelCase_ = self.shortcut(lowercase )
hidden_state += residual
lowerCamelCase_ = self.activation(lowercase )
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 ) -> Dict:
super().__init__()
lowerCamelCase_ = in_channels != out_channels or stride != 1
lowerCamelCase_ = max(1 , out_channels // config.groups_width )
lowerCamelCase_ = (
RegNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
lowerCamelCase_ = nn.Sequential(
RegNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase , lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act ) , RegNetSELayer(lowercase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , )
lowerCamelCase_ = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]:
lowerCamelCase_ = hidden_state
lowerCamelCase_ = self.layer(lowercase )
lowerCamelCase_ = self.shortcut(lowercase )
hidden_state += residual
lowerCamelCase_ = self.activation(lowercase )
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , ) -> Optional[int]:
super().__init__()
lowerCamelCase_ = RegNetXLayer if config.layer_type == "x" else RegNetYLayer
lowerCamelCase_ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowercase , lowercase , lowercase , stride=lowercase , ) , *[layer(lowercase , lowercase , lowercase ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> int:
lowerCamelCase_ = self.layers(lowercase )
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase ) -> int:
super().__init__()
lowerCamelCase_ = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowerCamelCase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase , config.depths[1:] ):
self.stages.append(RegNetStage(lowercase , lowercase , lowercase , depth=lowercase ) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = False , lowercase = True ) -> BaseModelOutputWithNoAttention:
lowerCamelCase_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCamelCase_ = hidden_states + (hidden_state,)
lowerCamelCase_ = stage_module(lowercase )
if output_hidden_states:
lowerCamelCase_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = RegNetConfig
lowerCAmelCase__ = 'regnet'
lowerCAmelCase__ = 'pixel_values'
lowerCAmelCase__ = True
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Any:
if isinstance(lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=False ) -> Any:
if isinstance(lowercase , lowercase ):
lowerCamelCase_ = value
__A =R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__A =R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , snake_case_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase ) -> List[str]:
super().__init__(lowercase )
lowerCamelCase_ = config
lowerCamelCase_ = RegNetEmbeddings(lowercase )
lowerCamelCase_ = RegNetEncoder(lowercase )
lowerCamelCase_ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None , lowercase = None ) -> BaseModelOutputWithPoolingAndNoAttention:
lowerCamelCase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ = self.embedder(lowercase )
lowerCamelCase_ = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase )
lowerCamelCase_ = encoder_outputs[0]
lowerCamelCase_ = self.pooler(lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , snake_case_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase ) -> Any:
super().__init__(lowercase )
lowerCamelCase_ = config.num_labels
lowerCamelCase_ = RegNetModel(lowercase )
# classification head
lowerCamelCase_ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE_( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , ) -> ImageClassifierOutputWithNoAttention:
lowerCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ = self.regnet(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
lowerCamelCase_ = outputs.pooler_output if return_dict else outputs[1]
lowerCamelCase_ = self.classifier(lowercase )
lowerCamelCase_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCamelCase_ = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCamelCase_ = "single_label_classification"
else:
lowerCamelCase_ = "multi_label_classification"
if self.config.problem_type == "regression":
lowerCamelCase_ = MSELoss()
if self.num_labels == 1:
lowerCamelCase_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCamelCase_ = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
lowerCamelCase_ = CrossEntropyLoss()
lowerCamelCase_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCamelCase_ = BCEWithLogitsLoss()
lowerCamelCase_ = loss_fct(lowercase , lowercase )
if not return_dict:
lowerCamelCase_ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
| 313
| 1
|
'''simple docstring'''
import requests
def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] ) -> None:
UpperCAmelCase : List[Any] = {'''Content-Type''': '''application/json'''}
UpperCAmelCase : List[str] = requests.post(UpperCAmelCase__ , json={'''text''': message_body} , headers=UpperCAmelCase__ )
if response.status_code != 200:
UpperCAmelCase : Optional[Any] = (
'''Request to slack returned an error '''
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(UpperCAmelCase__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 127
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE ( a ):
"""simple docstring"""
a_ : Tuple ="mra"
def __init__( self : Union[str, Any] , _snake_case : List[str]=5_0265 , _snake_case : Union[str, Any]=768 , _snake_case : Union[str, Any]=12 , _snake_case : Any=12 , _snake_case : str=3072 , _snake_case : int="gelu" , _snake_case : Tuple=0.1 , _snake_case : int=0.1 , _snake_case : Tuple=512 , _snake_case : Optional[Any]=1 , _snake_case : Union[str, Any]=0.02 , _snake_case : List[Any]=1E-5 , _snake_case : Optional[Any]="absolute" , _snake_case : List[Any]=4 , _snake_case : str="full" , _snake_case : Union[str, Any]=0 , _snake_case : Any=0 , _snake_case : str=1 , _snake_case : Union[str, Any]=0 , _snake_case : Optional[Any]=2 , **_snake_case : List[Any] , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
a__ = vocab_size
a__ = max_position_embeddings
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = initializer_range
a__ = type_vocab_size
a__ = layer_norm_eps
a__ = position_embedding_type
a__ = block_per_row
a__ = approx_mode
a__ = initial_prior_first_n_blocks
a__ = initial_prior_diagonal_n_blocks
| 232
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class A__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[Any]=7 , _SCREAMING_SNAKE_CASE: Optional[Any]=3 , _SCREAMING_SNAKE_CASE: Tuple=18 , _SCREAMING_SNAKE_CASE: Any=30 , _SCREAMING_SNAKE_CASE: str=400 , _SCREAMING_SNAKE_CASE: List[str]=True , _SCREAMING_SNAKE_CASE: List[Any]=None , _SCREAMING_SNAKE_CASE: List[Any]=True , _SCREAMING_SNAKE_CASE: List[str]=None , _SCREAMING_SNAKE_CASE: str=True , _SCREAMING_SNAKE_CASE: Dict=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE: Dict=[0.5, 0.5, 0.5] , ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = size if size is not None else {"shortest_edge": 18}
__lowerCAmelCase : Any = crop_size if crop_size is not None else {"height": 18, "width": 18}
__lowerCAmelCase : List[Any] = parent
__lowerCAmelCase : Any = batch_size
__lowerCAmelCase : Optional[int] = num_channels
__lowerCAmelCase : str = image_size
__lowerCAmelCase : int = min_resolution
__lowerCAmelCase : Any = max_resolution
__lowerCAmelCase : Union[str, Any] = do_resize
__lowerCAmelCase : Dict = size
__lowerCAmelCase : str = do_center_crop
__lowerCAmelCase : Tuple = crop_size
__lowerCAmelCase : List[str] = do_normalize
__lowerCAmelCase : int = image_mean
__lowerCAmelCase : List[str] = image_std
def _SCREAMING_SNAKE_CASE ( self: str) -> Tuple:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = LevitImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = LevitImageProcessingTester(self)
@property
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__UpperCamelCase , "image_mean"))
self.assertTrue(hasattr(__UpperCamelCase , "image_std"))
self.assertTrue(hasattr(__UpperCamelCase , "do_normalize"))
self.assertTrue(hasattr(__UpperCamelCase , "do_resize"))
self.assertTrue(hasattr(__UpperCamelCase , "do_center_crop"))
self.assertTrue(hasattr(__UpperCamelCase , "size"))
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 18})
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18})
__lowerCAmelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84})
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[Any]:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__lowerCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image)
# Test not batched input
__lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase : str = image_processing(__UpperCamelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self: str) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__lowerCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray)
# Test not batched input
__lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase : Optional[Any] = image_processing(__UpperCamelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__lowerCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor)
# Test not batched input
__lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase : List[Any] = image_processing(__UpperCamelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 707
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
@abstractmethod
def _SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE: ArgumentParser) -> str:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> str:
"""simple docstring"""
raise NotImplementedError()
| 615
| 0
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class _snake_case ( snake_case_ ):
'''simple docstring'''
def __init__( self: Union[str, Any] , **__UpperCamelCase: List[str] ) -> Tuple:
super().__init__(**__UpperCamelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self: Tuple , __UpperCamelCase: Union[str, List[str], "Image", List["Image"]] , **__UpperCamelCase: Optional[int] ) -> List[str]:
return super().__call__(__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self: Dict , **__UpperCamelCase: Dict ) -> int:
__magic_name__ : Dict = {}
if "candidate_labels" in kwargs:
__magic_name__ : List[str] = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
__magic_name__ : Dict = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def lowerCAmelCase__ ( self: List[Any] , __UpperCamelCase: int , __UpperCamelCase: int=None , __UpperCamelCase: Dict="This is a photo of {}." ) -> Optional[int]:
__magic_name__ : Tuple = load_image(__UpperCamelCase )
__magic_name__ : Optional[int] = self.image_processor(images=[image] , return_tensors=self.framework )
__magic_name__ : List[str] = candidate_labels
__magic_name__ : Dict = [hypothesis_template.format(__UpperCamelCase ) for x in candidate_labels]
__magic_name__ : Any = self.tokenizer(__UpperCamelCase , return_tensors=self.framework , padding=__UpperCamelCase )
__magic_name__ : str = [text_inputs]
return inputs
def lowerCAmelCase__ ( self: Any , __UpperCamelCase: Optional[Any] ) -> int:
__magic_name__ : Optional[int] = model_inputs.pop("candidate_labels" )
__magic_name__ : Dict = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , __UpperCamelCase ):
__magic_name__ : List[Any] = text_inputs[0]
else:
# Batching case.
__magic_name__ : int = text_inputs[0][0]
__magic_name__ : str = self.model(**__UpperCamelCase , **__UpperCamelCase )
__magic_name__ : str = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase__ ( self: str , __UpperCamelCase: Union[str, Any] ) -> Any:
__magic_name__ : List[str] = model_outputs.pop("candidate_labels" )
__magic_name__ : Dict = model_outputs["logits"][0]
if self.framework == "pt":
__magic_name__ : int = logits.softmax(dim=-1 ).squeeze(-1 )
__magic_name__ : List[Any] = probs.tolist()
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
__magic_name__ : Optional[Any] = [scores]
elif self.framework == "tf":
__magic_name__ : Any = stable_softmax(__UpperCamelCase , axis=-1 )
__magic_name__ : str = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__magic_name__ : str = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(__UpperCamelCase , __UpperCamelCase ) , key=lambda __UpperCamelCase : -x[0] )
]
return result
| 436
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not postfix_notation:
return 0
__magic_name__ : Optional[Any] = {"+", "-", "*", "/"}
__magic_name__ : list[Any] = []
for token in postfix_notation:
if token in operations:
__magic_name__ , __magic_name__ : List[Any] = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCamelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 436
| 1
|
from __future__ import annotations
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 0):
'''simple docstring'''
snake_case__ , snake_case__ = row, column
snake_case__ = [[default_value for c in range(UpperCamelCase__)] for r in range(UpperCamelCase__)]
def __str__( self : List[str]):
'''simple docstring'''
snake_case__ = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
snake_case__ = 0
for row_vector in self.array:
for obj in row_vector:
snake_case__ = max(UpperCamelCase__ , len(str(UpperCamelCase__)))
snake_case__ = F'''%{max_element_length}s'''
# Make string and return
def single_line(UpperCamelCase__ : list[float]) -> str:
nonlocal string_format_identifier
snake_case__ = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(UpperCamelCase__) for row_vector in self.array)
return s
def __repr__( self : int):
'''simple docstring'''
return str(self)
def __magic_name__ ( self : Tuple , UpperCamelCase__ : tuple[int, int]):
'''simple docstring'''
if not (isinstance(UpperCamelCase__ , (list, tuple)) and len(UpperCamelCase__) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : str , UpperCamelCase__ : tuple[int, int]):
'''simple docstring'''
assert self.validate_indicies(UpperCamelCase__)
return self.array[loc[0]][loc[1]]
def __setitem__( self : Dict , UpperCamelCase__ : tuple[int, int] , UpperCamelCase__ : float):
'''simple docstring'''
assert self.validate_indicies(UpperCamelCase__)
snake_case__ = value
def __add__( self : Optional[int] , UpperCamelCase__ : Matrix):
'''simple docstring'''
assert isinstance(UpperCamelCase__ , UpperCamelCase__)
assert self.row == another.row and self.column == another.column
# Add
snake_case__ = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
snake_case__ = self[r, c] + another[r, c]
return result
def __neg__( self : Optional[int]):
'''simple docstring'''
snake_case__ = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
snake_case__ = -self[r, c]
return result
def __sub__( self : List[str] , UpperCamelCase__ : Matrix):
'''simple docstring'''
return self + (-another)
def __mul__( self : Any , UpperCamelCase__ : int | float | Matrix):
'''simple docstring'''
if isinstance(UpperCamelCase__ , (int, float)): # Scalar multiplication
snake_case__ = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
snake_case__ = self[r, c] * another
return result
elif isinstance(UpperCamelCase__ , UpperCamelCase__): # Matrix multiplication
assert self.column == another.row
snake_case__ = Matrix(self.row , another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
snake_case__ = F'''Unsupported type given for another ({type(UpperCamelCase__)})'''
raise TypeError(UpperCamelCase__)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = Matrix(self.column , self.row)
for r in range(self.row):
for c in range(self.column):
snake_case__ = self[r, c]
return result
def __magic_name__ ( self : Dict , UpperCamelCase__ : Matrix , UpperCamelCase__ : Matrix):
'''simple docstring'''
assert isinstance(UpperCamelCase__ , UpperCamelCase__) and isinstance(UpperCamelCase__ , UpperCamelCase__)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
snake_case__ = v.transpose()
snake_case__ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _UpperCAmelCase ( ):
# a^(-1)
snake_case__ = Matrix(3 , 3 , 0 )
for i in range(3 ):
snake_case__ = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
snake_case__ = Matrix(3 , 1 , 0 )
snake_case__ , snake_case__ , snake_case__ = 1, 2, -3
snake_case__ = Matrix(3 , 1 , 0 )
snake_case__ , snake_case__ , snake_case__ = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(a , a )}''' )
def _UpperCAmelCase ( ):
import doctest
doctest.testmod()
testa()
| 99
|
from __future__ import annotations
import time
a__ = list[tuple[int, int]]
a__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Node | None):
'''simple docstring'''
snake_case__ = pos_x
snake_case__ = pos_y
snake_case__ = (pos_y, pos_x)
snake_case__ = goal_x
snake_case__ = goal_y
snake_case__ = parent
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : tuple[int, int] , UpperCamelCase__ : tuple[int, int]):
'''simple docstring'''
snake_case__ = Node(start[1] , start[0] , goal[1] , goal[0] , UpperCamelCase__)
snake_case__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , UpperCamelCase__)
snake_case__ = [self.start]
snake_case__ = False
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
while self.node_queue:
snake_case__ = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
snake_case__ = True
return self.retrace_path(UpperCamelCase__)
snake_case__ = self.get_successors(UpperCamelCase__)
for node in successors:
self.node_queue.append(UpperCamelCase__)
if not self.reached:
return [self.start.pos]
return None
def __magic_name__ ( self : Any , UpperCamelCase__ : Node):
'''simple docstring'''
snake_case__ = []
for action in delta:
snake_case__ = parent.pos_x + action[1]
snake_case__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(UpperCamelCase__) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(UpperCamelCase__ , UpperCamelCase__ , self.target.pos_y , self.target.pos_x , UpperCamelCase__))
return successors
def __magic_name__ ( self : str , UpperCamelCase__ : Node | None):
'''simple docstring'''
snake_case__ = node
snake_case__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
snake_case__ = current_node.parent
path.reverse()
return path
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any):
'''simple docstring'''
snake_case__ = BreadthFirstSearch(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = BreadthFirstSearch(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = False
def __magic_name__ ( self : Any):
'''simple docstring'''
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case__ = self.fwd_bfs.node_queue.pop(0)
snake_case__ = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
snake_case__ = True
return self.retrace_bidirectional_path(
UpperCamelCase__ , UpperCamelCase__)
snake_case__ = current_bwd_node
snake_case__ = current_fwd_node
snake_case__ = {
self.fwd_bfs: self.fwd_bfs.get_successors(UpperCamelCase__),
self.bwd_bfs: self.bwd_bfs.get_successors(UpperCamelCase__),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(UpperCamelCase__)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Node , UpperCamelCase__ : Node):
'''simple docstring'''
snake_case__ = self.fwd_bfs.retrace_path(UpperCamelCase__)
snake_case__ = self.bwd_bfs.retrace_path(UpperCamelCase__)
bwd_path.pop()
bwd_path.reverse()
snake_case__ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a__ = (0, 0)
a__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a__ = time.time()
a__ = BreadthFirstSearch(init, goal)
a__ = bfs.search()
a__ = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
a__ = time.time()
a__ = BidirectionalBreadthFirstSearch(init, goal)
a__ = bd_bfs.search()
a__ = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 99
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.