code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( _A ):
a_ = (DDIMParallelScheduler,)
a_ = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def lowerCamelCase_ ( self , **UpperCamelCase__ ) -> int:
"""simple docstring"""
UpperCAmelCase_ = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**UpperCamelCase__ )
return config
def lowerCamelCase_ ( self , **UpperCamelCase__ ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**UpperCamelCase__ )
UpperCAmelCase_ = scheduler_class(**UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = 1_0, 0.0
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for t in scheduler.timesteps:
UpperCAmelCase_ = model(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
for timesteps in [1_0_0, 5_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCamelCase__ )
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) )
def lowerCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
self.check_over_configs(thresholding=UpperCamelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase__ , prediction_type=UpperCamelCase__ , sample_max_value=UpperCamelCase__ , )
def lowerCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
for t in [1, 1_0, 4_9]:
self.check_over_forward(time_step=UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ):
self.check_over_forward(time_step=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=UpperCamelCase__ , eta=UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.14771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.32460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.02 ) ) < 1e-5
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = 1_0, 0.0
scheduler.set_timesteps(UpperCamelCase__ )
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
UpperCAmelCase_ = self.dummy_sample_deter + 0.1
UpperCAmelCase_ = self.dummy_sample_deter - 0.1
UpperCAmelCase_ = samplea.shape[0]
UpperCAmelCase_ = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCAmelCase_ = torch.arange(UpperCamelCase__ )[0:3, None].repeat(1 , UpperCamelCase__ )
UpperCAmelCase_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCAmelCase_ = scheduler.batch_step_no_noise(UpperCamelCase__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , UpperCamelCase__ )
UpperCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
UpperCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
UpperCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.223967 ) < 1e-3
def lowerCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(prediction_type="v_prediction" )
UpperCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
UpperCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=UpperCamelCase__ , beta_start=0.01 )
UpperCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
UpperCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=UpperCamelCase__ , beta_start=0.01 )
UpperCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
UpperCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3
| 660 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Union[str, Any] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 660 | 1 |
'''simple docstring'''
__snake_case : int = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__snake_case : Dict = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__snake_case : Optional[int] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 660 | '''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
class lowercase_ ( _A ):
a_ = """linear"""
a_ = """cosine"""
a_ = """cosine_with_restarts"""
a_ = """polynomial"""
a_ = """constant"""
a_ = """constant_with_warmup"""
a_ = """piecewise_constant"""
def lowerCamelCase__ ( A_ , A_ = -1 ):
return LambdaLR(A_ , lambda A_ : 1 , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1.0 , A_ ) )
return 1.0
return LambdaLR(A_ , A_ , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ = -1 ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = step_rules.split("," )
for rule_str in rule_list[:-1]:
UpperCAmelCase_ , UpperCAmelCase_ = rule_str.split(":" )
UpperCAmelCase_ = int(A_ )
UpperCAmelCase_ = float(A_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = float(rule_list[-1] )
def create_rules_function(A_ , A_ ):
def rule_func(A_ ) -> float:
UpperCAmelCase_ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(A_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
UpperCAmelCase_ = create_rules_function(A_ , A_ )
return LambdaLR(A_ , A_ , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=-1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 0.5 , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(A_ ) * 2.0 * progress )) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 1 , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(A_ ) * progress) % 1.0) )) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=1e-7 , A_=1.0 , A_=-1 ):
UpperCAmelCase_ = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
UpperCAmelCase_ = lr_init - lr_end
UpperCAmelCase_ = num_training_steps - num_warmup_steps
UpperCAmelCase_ = 1 - (current_step - num_warmup_steps) / decay_steps
UpperCAmelCase_ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(A_ , A_ , A_ )
__snake_case : str = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( A_ , A_ , A_ = None , A_ = None , A_ = None , A_ = 1 , A_ = 1.0 , A_ = -1 , ):
UpperCAmelCase_ = SchedulerType(A_ )
UpperCAmelCase_ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(A_ , last_epoch=A_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(A_ , step_rules=A_ , last_epoch=A_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(A_ , num_warmup_steps=A_ , last_epoch=A_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , num_cycles=A_ , last_epoch=A_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , power=A_ , last_epoch=A_ , )
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , last_epoch=A_ )
| 660 | 1 |
'''simple docstring'''
class lowercase_ :
def __init__( self , UpperCamelCase__ ) -> None:
"""simple docstring"""
UpperCAmelCase_ = len(UpperCamelCase__ )
UpperCAmelCase_ = [0] * len_array
if len_array > 0:
UpperCAmelCase_ = array[0]
for i in range(1 , UpperCamelCase__ ):
UpperCAmelCase_ = self.prefix_sum[i - 1] + array[i]
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> bool:
"""simple docstring"""
UpperCAmelCase_ = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCamelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case : Optional[int] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__snake_case : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 660 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowercase_ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 0 ) -> None:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = row, column
UpperCAmelCase_ = [[default_value for c in range(UpperCamelCase__ )] for r in range(UpperCamelCase__ )]
def __str__( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = F"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
UpperCAmelCase_ = 0
for row_vector in self.array:
for obj in row_vector:
UpperCAmelCase_ = max(UpperCamelCase__ , len(str(UpperCamelCase__ ) ) )
UpperCAmelCase_ = F"""%{max_element_length}s"""
# Make string and return
def single_line(UpperCamelCase__ ) -> str:
nonlocal string_format_identifier
UpperCAmelCase_ = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCamelCase__ ) for row_vector in self.array )
return s
def __repr__( self ) -> str:
"""simple docstring"""
return str(self )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> bool:
"""simple docstring"""
if not (isinstance(UpperCamelCase__ , (list, tuple) ) and len(UpperCamelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , UpperCamelCase__ ) -> Any:
"""simple docstring"""
assert self.validate_indicies(UpperCamelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self , UpperCamelCase__ , UpperCamelCase__ ) -> None:
"""simple docstring"""
assert self.validate_indicies(UpperCamelCase__ )
UpperCAmelCase_ = value
def __add__( self , UpperCamelCase__ ) -> Matrix:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert self.row == another.row and self.column == another.column
# Add
UpperCAmelCase_ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase_ = self[r, c] + another[r, c]
return result
def __neg__( self ) -> Matrix:
"""simple docstring"""
UpperCAmelCase_ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase_ = -self[r, c]
return result
def __sub__( self , UpperCamelCase__ ) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self , UpperCamelCase__ ) -> Matrix:
"""simple docstring"""
if isinstance(UpperCamelCase__ , (int, float) ): # Scalar multiplication
UpperCAmelCase_ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase_ = self[r, c] * another
return result
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): # Matrix multiplication
assert self.column == another.row
UpperCAmelCase_ = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
UpperCAmelCase_ = F"""Unsupported type given for another ({type(UpperCamelCase__ )})"""
raise TypeError(UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Matrix:
"""simple docstring"""
UpperCAmelCase_ = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase_ = self[r, c]
return result
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
UpperCAmelCase_ = v.transpose()
UpperCAmelCase_ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCamelCase__ ( ):
# a^(-1)
UpperCAmelCase_ = Matrix(3 , 3 , 0 )
for i in range(3 ):
UpperCAmelCase_ = 1
print(F"""a^(-1) is {ainv}""" )
# u, v
UpperCAmelCase_ = Matrix(3 , 1 , 0 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1, 2, -3
UpperCAmelCase_ = Matrix(3 , 1 , 0 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 4, -2, 5
print(F"""u is {u}""" )
print(F"""v is {v}""" )
print(F"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(F"""(a + uv^T)^(-1) is {ainv.sherman_morrison(A_ , A_ )}""" )
def lowerCamelCase__ ( ):
import doctest
doctest.testmod()
testa()
| 660 | '''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case : Union[str, Any] = ''''''
__snake_case : List[Any] = ''''''
__snake_case : List[str] = ''''''
__snake_case : Any = ''''''
def lowerCamelCase__ ( A_ ):
# authorize twitter, initialize tweepy
UpperCAmelCase_ = tweepy.OAuthHandler(A_ , A_ )
auth.set_access_token(A_ , A_ )
UpperCAmelCase_ = tweepy.API(A_ )
# initialize a list to hold all the tweepy Tweets
UpperCAmelCase_ = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCAmelCase_ = api.user_timeline(screen_name=A_ , count=200 )
# save most recent tweets
alltweets.extend(A_ )
# save the id of the oldest tweet less one
UpperCAmelCase_ = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(A_ ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
UpperCAmelCase_ = api.user_timeline(
screen_name=A_ , count=200 , max_id=A_ )
# save most recent tweets
alltweets.extend(A_ )
# update the id of the oldest tweet less one
UpperCAmelCase_ = alltweets[-1].id - 1
print(F"""...{len(A_ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCAmelCase_ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , "w" ) as f:
UpperCAmelCase_ = csv.writer(A_ )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(A_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 660 | 1 |
'''simple docstring'''
def lowerCamelCase__ ( A_ ):
UpperCAmelCase_ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def lowerCamelCase__ ( A_ = 100 ):
UpperCAmelCase_ = 1
UpperCAmelCase_ = 2
for i in range(2 , max_n + 1 ):
UpperCAmelCase_ = pre_numerator
UpperCAmelCase_ = 2 * i // 3 if i % 3 == 0 else 1
UpperCAmelCase_ = cur_numerator
UpperCAmelCase_ = e_cont * pre_numerator + temp
return sum_digits(A_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 660 | '''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__snake_case : int = logging.get_logger(__name__)
class lowercase_ ( _A ):
def __init__( self , **UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["bs4"] )
super().__init__(**UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCAmelCase_ = parent.find_all(child.name , recursive=UpperCamelCase__ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(UpperCamelCase__ ) else next(i for i, s in enumerate(UpperCamelCase__ , 1 ) if s is child ) )
UpperCAmelCase_ = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = BeautifulSoup(UpperCamelCase__ , "html.parser" )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for element in html_code.descendants:
if type(UpperCamelCase__ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCAmelCase_ = html.unescape(UpperCamelCase__ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = self.xpath_soup(UpperCamelCase__ )
stringaxtag_seq.append(UpperCamelCase__ )
stringaxsubs_seq.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = ""
for tagname, subs in zip(UpperCamelCase__ , UpperCamelCase__ ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , UpperCamelCase__ ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase_ = False
# Check that strings has a valid type
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = True
elif isinstance(UpperCamelCase__ , (list, tuple) ):
if len(UpperCamelCase__ ) == 0 or isinstance(html_strings[0] , UpperCamelCase__ ):
UpperCAmelCase_ = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
F"""but is of type {type(UpperCamelCase__ )}.""" )
UpperCAmelCase_ = bool(isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(html_strings[0] , UpperCamelCase__ )) )
if not is_batched:
UpperCAmelCase_ = [html_strings]
# Get nodes + xpaths
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for html_string in html_strings:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.get_three_from_single(UpperCamelCase__ )
nodes.append(UpperCamelCase__ )
UpperCAmelCase_ = []
for node, tag_list, sub_list in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = self.construct_xpath(UpperCamelCase__ , UpperCamelCase__ )
xpath_strings.append(UpperCamelCase__ )
xpaths.append(UpperCamelCase__ )
# return as Dict
UpperCAmelCase_ = {"nodes": nodes, "xpaths": xpaths}
UpperCAmelCase_ = BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
return encoded_inputs
| 660 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case : Any = logging.get_logger(__name__)
__snake_case : Dict = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class lowercase_ ( _A , _A ):
a_ = """resnet"""
a_ = ["""basic""", """bottleneck"""]
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=6_4 , UpperCamelCase__=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , UpperCamelCase__=[3, 4, 6, 3] , UpperCamelCase__="bottleneck" , UpperCamelCase__="relu" , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> int:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embedding_size
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = layer_type
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = downsample_in_first_stage
UpperCAmelCase_ = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(UpperCamelCase__ ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=UpperCamelCase__ , out_indices=UpperCamelCase__ , stage_names=self.stage_names )
class lowercase_ ( _A ):
a_ = version.parse("""1.11""" )
@property
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCamelCase_ ( self ) -> float:
"""simple docstring"""
return 1e-3
| 660 | '''simple docstring'''
def lowerCamelCase__ ( A_ , A_ ):
_validate_point(A_ )
_validate_point(A_ )
if len(A_ ) != len(A_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(A_ , A_ ) ) )
def lowerCamelCase__ ( A_ ):
if point:
if isinstance(A_ , A_ ):
for item in point:
if not isinstance(A_ , (int, float) ):
UpperCAmelCase_ = (
"Expected a list of numbers as input, found "
F"""{type(A_ ).__name__}"""
)
raise TypeError(A_ )
else:
UpperCAmelCase_ = F"""Expected a list of numbers as input, found {type(A_ ).__name__}"""
raise TypeError(A_ )
else:
raise ValueError("Missing an input" )
def lowerCamelCase__ ( A_ , A_ ):
_validate_point(A_ )
_validate_point(A_ )
if len(A_ ) != len(A_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(A_ , A_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 1 |
'''simple docstring'''
def lowerCamelCase__ ( A_ , A_ ):
UpperCAmelCase_ = ""
for word_or_phrase in separated:
if not isinstance(A_ , A_ ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(A_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 660 | '''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__snake_case : Optional[int] = logging.getLogger(__name__)
def lowerCamelCase__ ( A_ , A_ ):
# save results
if os.path.exists(A_ ):
if os.path.exists(os.path.join(A_ , "config.json" ) ) and os.path.isfile(
os.path.join(A_ , "config.json" ) ):
os.remove(os.path.join(A_ , "config.json" ) )
if os.path.exists(os.path.join(A_ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(A_ , "pytorch_model.bin" ) ):
os.remove(os.path.join(A_ , "pytorch_model.bin" ) )
else:
os.makedirs(A_ )
model.save_pretrained(A_ )
def lowerCamelCase__ ( A_ , A_=False ):
UpperCAmelCase_ = 2
if unlogit:
UpperCAmelCase_ = torch.pow(A_ , A_ )
UpperCAmelCase_ = p * torch.log(A_ )
UpperCAmelCase_ = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase__ ( A_ ):
logger.info("lv, h >\t" + "\t".join(F"""{x + 1}""" for x in range(len(A_ ) ) ) )
for row in range(len(A_ ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=True , A_=True , A_=None , A_=False ):
UpperCAmelCase_ , UpperCAmelCase_ = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase_ = torch.zeros(A_ , A_ ).to(args.device )
UpperCAmelCase_ = torch.zeros(A_ , A_ ).to(args.device )
if head_mask is None:
UpperCAmelCase_ = torch.ones(A_ , A_ ).to(args.device )
head_mask.requires_grad_(requires_grad=A_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase_ = None
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 0.0
for step, inputs in enumerate(tqdm(A_ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase_ = tuple(t.to(args.device ) for t in inputs )
((UpperCAmelCase_) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase_ = model(A_ , labels=A_ , head_mask=A_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(A_ ):
UpperCAmelCase_ = entropy(attn.detach() , A_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(A_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase_ = 2
UpperCAmelCase_ = torch.pow(torch.pow(A_ , A_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
UpperCAmelCase_ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(A_ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(A_ )
logger.info("Head ranked by importance scores" )
UpperCAmelCase_ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase_ = torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase_ = head_ranks.view_as(A_ )
print_ad_tensor(A_ )
return attn_entropy, head_importance, total_loss
def lowerCamelCase__ ( A_ , A_ , A_ ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(A_ , A_ , A_ , compute_entropy=A_ )
UpperCAmelCase_ = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , A_ , original_score * args.masking_threshold )
UpperCAmelCase_ = torch.ones_like(A_ )
UpperCAmelCase_ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase_ = original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase_ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase_ = float("Inf" )
UpperCAmelCase_ = head_importance.view(-1 ).sort()[1]
if len(A_ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
UpperCAmelCase_ = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase_ = new_head_mask.view(-1 )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = new_head_mask.view_as(A_ )
UpperCAmelCase_ = new_head_mask.clone().detach()
print_ad_tensor(A_ )
# Compute metric and head importance again
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , head_mask=A_ )
UpperCAmelCase_ = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , A_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(A_ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase__ ( A_ , A_ , A_ , A_ ):
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(A_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(A_ , A_ ):
UpperCAmelCase_ = [
v,
]
assert sum(len(A_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(A_ )
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ , actually_pruned=A_ , )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , A_ , A_ , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , A_ , A_ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(A_ , args.output_dir )
def lowerCamelCase__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=A_ , type=A_ , required=A_ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=A_ , type=A_ , required=A_ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=A_ , type=A_ , required=A_ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=A_ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=A_ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=A_ , type=A_ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=A_ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=A_ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=A_ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=A_ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=A_ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=A_ , help="Batch size." )
parser.add_argument("--seed" , type=A_ , default=42 )
parser.add_argument("--local_rank" , type=A_ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=A_ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=A_ , default="" , help="Can be used for distant debugging." )
UpperCAmelCase_ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase_ = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
UpperCAmelCase_ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase_ = torch.device("cuda" , args.local_rank )
UpperCAmelCase_ = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase_ = nn.parallel.DistributedDataParallel(
A_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=A_ )
elif args.n_gpu > 1:
UpperCAmelCase_ = nn.DataParallel(A_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=A_ )
torch.save(A_ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , A_ )
# Prepare dataset
UpperCAmelCase_ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase_ = (torch.from_numpy(A_ ),)
UpperCAmelCase_ = TensorDataset(*A_ )
UpperCAmelCase_ = RandomSampler(A_ )
UpperCAmelCase_ = DataLoader(A_ , sampler=A_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(A_ , A_ , A_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase_ = mask_heads(A_ , A_ , A_ )
prune_heads(A_ , A_ , A_ , A_ )
if __name__ == "__main__":
main()
| 660 | 1 |
'''simple docstring'''
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class lowercase_ ( _A ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self , UpperCamelCase__ = 1 , UpperCamelCase__ = None , UpperCamelCase__ = 5_0 , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , **UpperCamelCase__ , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
UpperCAmelCase_ = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCamelCase__ , )
UpperCAmelCase_ = image.to(self.device )
# set step values
self.scheduler.set_timesteps(UpperCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase_ = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCAmelCase_ = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
UpperCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase_ = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=UpperCamelCase__ ), "This is a local test"
| 660 | '''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__snake_case : str = logging.getLogger(__name__)
def lowerCamelCase__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=A_ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=A_ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=A_ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=A_ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase_ = parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
UpperCAmelCase_ = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase_ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase_ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase_ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase_ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase_ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase_ = fp.readlines()
logger.info("Start encoding" )
logger.info(F"""{len(A_ )} examples to process.""" )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
UpperCAmelCase_ = 10_000
UpperCAmelCase_ = time.time()
for text in data:
UpperCAmelCase_ = F"""{bos} {text.strip()} {sep}"""
UpperCAmelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
rslt.append(A_ )
iter += 1
if iter % interval == 0:
UpperCAmelCase_ = time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
UpperCAmelCase_ = time.time()
logger.info("Finished binarization" )
logger.info(F"""{len(A_ )} examples processed.""" )
UpperCAmelCase_ = F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
UpperCAmelCase_ = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase_ = [np.uintaa(A_ ) for d in rslt]
else:
UpperCAmelCase_ = [np.intaa(A_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(A_ , "wb" ) as handle:
pickle.dump(rslt_ , A_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 660 | 1 |
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__snake_case : Optional[Any] = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__snake_case : Any = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def lowerCamelCase__ ( A_ , A_ , A_ ):
UpperCAmelCase_ = SavedModel()
UpperCAmelCase_ = []
with open(os.path.join(A_ , "utils" , "tf_ops" , "onnx.json" ) ) as f:
UpperCAmelCase_ = json.load(A_ )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(A_ )] )
with open(A_ , "rb" ) as f:
saved_model.ParseFromString(f.read() )
UpperCAmelCase_ = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCAmelCase_ = sorted(A_ )
UpperCAmelCase_ = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(A_ )
if strict and len(A_ ) > 0:
raise Exception(F"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(A_ ) > 0:
print(F"""Found the following incompatible ops for the opset {opset}:""" )
print(*A_ , sep="\n" )
else:
print(F"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
__snake_case : str = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
__snake_case : List[Any] = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 660 | '''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__snake_case : Dict = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__snake_case : str = json.load(f)
@require_torch
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = FSMTForConditionalGeneration.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = F"""facebook/wmt19-{pair}"""
UpperCAmelCase_ = self.get_tokenizer(UpperCamelCase__ )
UpperCAmelCase_ = self.get_model(UpperCamelCase__ )
UpperCAmelCase_ = bleu_data[pair]["src"]
UpperCAmelCase_ = bleu_data[pair]["tgt"]
UpperCAmelCase_ = tokenizer(UpperCamelCase__ , return_tensors="pt" , truncation=UpperCamelCase__ , padding="longest" ).to(UpperCamelCase__ )
UpperCAmelCase_ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase_ = tokenizer.batch_decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
UpperCAmelCase_ = calculate_bleu(UpperCamelCase__ , UpperCamelCase__ )
print(UpperCamelCase__ )
self.assertGreaterEqual(scores["bleu"] , UpperCamelCase__ )
| 660 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
UpperCAmelCase_ = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = "sshleifer/tiny-gpt2"
UpperCAmelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
UpperCAmelCase_ = TensorFlowBenchmark(UpperCamelCase__ )
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = "sgugger/tiny-distilbert-classification"
UpperCAmelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , only_pretrain_model=UpperCamelCase__ , )
UpperCAmelCase_ = TensorFlowBenchmark(UpperCamelCase__ )
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = "sshleifer/tiny-gpt2"
UpperCAmelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
UpperCAmelCase_ = TensorFlowBenchmark(UpperCamelCase__ )
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = "sshleifer/tiny-gpt2"
UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
UpperCAmelCase_ = TensorFlowBenchmark(UpperCamelCase__ , [config] )
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = "sshleifer/tiny-gpt2"
UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
UpperCAmelCase_ = TensorFlowBenchmark(UpperCamelCase__ , [config] )
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = "sshleifer/tiny-gpt2"
UpperCAmelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
UpperCAmelCase_ = TensorFlowBenchmark(UpperCamelCase__ )
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = "sshleifer/tiny-gpt2"
UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
UpperCAmelCase_ = TensorFlowBenchmark(UpperCamelCase__ , [config] )
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = "patrickvonplaten/t5-tiny-random"
UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
UpperCAmelCase_ = TensorFlowBenchmark(UpperCamelCase__ , configs=[config] )
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0 , "Cannot do xla on CPU." )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = "sshleifer/tiny-gpt2"
UpperCAmelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
UpperCAmelCase_ = TensorFlowBenchmark(UpperCamelCase__ )
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCamelCase__ , save_to_csv=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCamelCase__ , "inf_time.csv" ) , inference_memory_csv_file=os.path.join(UpperCamelCase__ , "inf_mem.csv" ) , env_info_csv_file=os.path.join(UpperCamelCase__ , "env.csv" ) , multi_process=UpperCamelCase__ , )
UpperCAmelCase_ = TensorFlowBenchmark(UpperCamelCase__ )
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "env.csv" ) ).exists() )
def lowerCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(UpperCamelCase__ ):
self.assertTrue(hasattr(UpperCamelCase__ , "sequential" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "cumulative" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "current" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCamelCase__ , "log.txt" ) , log_print=UpperCamelCase__ , trace_memory_line_by_line=UpperCamelCase__ , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
UpperCAmelCase_ = TensorFlowBenchmark(UpperCamelCase__ )
UpperCAmelCase_ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "log.txt" ) ).exists() )
| 660 | '''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__snake_case : List[Any] = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Optional[int] = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : int = {
'''sample_size''': 2_56,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Dict = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : Tuple = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : str = {
'''num_train_timesteps''': 1_51,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def lowerCamelCase__ ( A_ ):
if isinstance(A_ , A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=False ):
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=None ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.bias"""]
UpperCAmelCase_ = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = (
checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ ):
UpperCAmelCase_ = torch.load(A_ , map_location="cpu" )
UpperCAmelCase_ = {}
UpperCAmelCase_ = checkpoint["time_embed.0.weight"]
UpperCAmelCase_ = checkpoint["time_embed.0.bias"]
UpperCAmelCase_ = checkpoint["time_embed.2.weight"]
UpperCAmelCase_ = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase_ = checkpoint["label_emb.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.bias"]
UpperCAmelCase_ = unet_config["down_block_types"]
UpperCAmelCase_ = unet_config["layers_per_block"]
UpperCAmelCase_ = unet_config["attention_head_dim"]
UpperCAmelCase_ = unet_config["block_out_channels"]
UpperCAmelCase_ = 1
UpperCAmelCase_ = channels_list[0]
for i, layer_type in enumerate(A_ ):
UpperCAmelCase_ = channels_list[i]
UpperCAmelCase_ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""down_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""down_blocks.{i}.downsamplers.0"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
current_layer += 1
UpperCAmelCase_ = current_channels
# hardcoded the mid-block for now
UpperCAmelCase_ = "mid_block.resnets.0"
UpperCAmelCase_ = "middle_block.0"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.attentions.0"
UpperCAmelCase_ = "middle_block.1"
UpperCAmelCase_ = convert_attention(A_ , A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.resnets.1"
UpperCAmelCase_ = "middle_block.2"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = 0
UpperCAmelCase_ = unet_config["up_block_types"]
for i, layer_type in enumerate(A_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.1"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""up_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.2"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = checkpoint["out.0.weight"]
UpperCAmelCase_ = checkpoint["out.0.bias"]
UpperCAmelCase_ = checkpoint["out.2.weight"]
UpperCAmelCase_ = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
__snake_case : List[str] = parser.parse_args()
__snake_case : Any = strabool(args.class_cond)
__snake_case : List[str] = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
__snake_case : Optional[int] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__snake_case : List[str] = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
__snake_case : Optional[Any] = None
__snake_case : Optional[int] = con_pt_to_diffuser(args.unet_path, unet_config)
__snake_case : str = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__snake_case : Tuple = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__snake_case : Optional[int] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
__snake_case : Optional[Any] = CMStochasticIterativeScheduler(**scheduler_config)
__snake_case : Dict = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 660 | 1 |
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCamelCase__ ( A_ , A_ ):
UpperCAmelCase_ = XCLIPTextConfig()
# derive patch size from model name
UpperCAmelCase_ = model_name.find("patch" )
UpperCAmelCase_ = int(model_name[start_idx + len("patch" ) : start_idx + len("patch" ) + 2] )
UpperCAmelCase_ = XCLIPVisionConfig(patch_size=A_ , num_frames=A_ )
if "large" in model_name:
UpperCAmelCase_ = 768
UpperCAmelCase_ = 3_072
UpperCAmelCase_ = 12
UpperCAmelCase_ = 1_024
UpperCAmelCase_ = 4_096
UpperCAmelCase_ = 16
UpperCAmelCase_ = 24
UpperCAmelCase_ = 768
UpperCAmelCase_ = 3_072
if model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase_ = 336
UpperCAmelCase_ = XCLIPConfig.from_text_vision_configs(A_ , A_ )
if "large" in model_name:
UpperCAmelCase_ = 768
return config
def lowerCamelCase__ ( A_ ):
# text encoder
if name == "token_embedding.weight":
UpperCAmelCase_ = name.replace("token_embedding.weight" , "text_model.embeddings.token_embedding.weight" )
if name == "positional_embedding":
UpperCAmelCase_ = name.replace("positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "ln_1" in name:
UpperCAmelCase_ = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
UpperCAmelCase_ = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
UpperCAmelCase_ = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
UpperCAmelCase_ = name.replace("c_proj" , "fc2" )
if name.startswith("transformer.resblocks" ):
UpperCAmelCase_ = name.replace("transformer.resblocks" , "text_model.encoder.layers" )
if "attn.out_proj" in name and "message" not in name:
UpperCAmelCase_ = name.replace("attn.out_proj" , "self_attn.out_proj" )
if "ln_final" in name:
UpperCAmelCase_ = name.replace("ln_final" , "text_model.final_layer_norm" )
# visual encoder
if name == "visual.class_embedding":
UpperCAmelCase_ = name.replace("visual.class_embedding" , "vision_model.embeddings.class_embedding" )
if name == "visual.positional_embedding":
UpperCAmelCase_ = name.replace("visual.positional_embedding" , "vision_model.embeddings.position_embedding.weight" )
if name.startswith("visual.transformer.resblocks" ):
UpperCAmelCase_ = name.replace("visual.transformer.resblocks" , "vision_model.encoder.layers" )
if "visual.conv1" in name:
UpperCAmelCase_ = name.replace("visual.conv1" , "vision_model.embeddings.patch_embedding" )
if "visual.ln_pre" in name:
UpperCAmelCase_ = name.replace("visual.ln_pre" , "vision_model.pre_layernorm" )
if "visual.ln_post" in name:
UpperCAmelCase_ = name.replace("visual.ln_post" , "vision_model.post_layernorm" )
if "visual.proj" in name:
UpperCAmelCase_ = name.replace("visual.proj" , "visual_projection.weight" )
if "text_projection" in name:
UpperCAmelCase_ = name.replace("text_projection" , "text_projection.weight" )
# things on top
if "prompts_visual_proj" in name:
UpperCAmelCase_ = name.replace("prompts_visual_proj" , "prompts_visual_projection" )
if "prompts_visual_ln" in name:
UpperCAmelCase_ = name.replace("prompts_visual_ln" , "prompts_visual_layernorm" )
# mit
if name == "mit.positional_embedding":
UpperCAmelCase_ = name.replace("positional" , "position" )
if name.startswith("mit.resblocks" ):
UpperCAmelCase_ = name.replace("mit.resblocks" , "mit.encoder.layers" )
# prompts generator
if name.startswith("prompts_generator.norm" ):
UpperCAmelCase_ = name.replace("prompts_generator.norm" , "prompts_generator.layernorm" )
return name
def lowerCamelCase__ ( A_ , A_ ):
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ = orig_state_dict.pop(A_ )
if "attn.in_proj" in key:
UpperCAmelCase_ = key.split("." )
if key.startswith("visual" ):
UpperCAmelCase_ = key_split[3]
UpperCAmelCase_ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCAmelCase_ = val[
:dim, :
]
UpperCAmelCase_ = val[
dim : dim * 2, :
]
UpperCAmelCase_ = val[
-dim:, :
]
else:
UpperCAmelCase_ = val[
:dim
]
UpperCAmelCase_ = val[
dim : dim * 2
]
UpperCAmelCase_ = val[
-dim:
]
else:
if "weight" in key:
UpperCAmelCase_ = val[
:dim, :
]
UpperCAmelCase_ = val[
dim : dim * 2, :
]
UpperCAmelCase_ = val[
-dim:, :
]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[
dim : dim * 2
]
UpperCAmelCase_ = val[-dim:]
elif key.startswith("mit" ):
UpperCAmelCase_ = key_split[2]
UpperCAmelCase_ = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[dim : dim * 2, :]
UpperCAmelCase_ = val[-dim:, :]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[dim : dim * 2]
UpperCAmelCase_ = val[-dim:]
else:
UpperCAmelCase_ = key_split[2]
UpperCAmelCase_ = config.text_config.hidden_size
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[
dim : dim * 2, :
]
UpperCAmelCase_ = val[-dim:, :]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[
dim : dim * 2
]
UpperCAmelCase_ = val[-dim:]
else:
UpperCAmelCase_ = rename_key(A_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCAmelCase_ = val.T
UpperCAmelCase_ = val
return orig_state_dict
def lowerCamelCase__ ( A_ ):
if num_frames == 8:
UpperCAmelCase_ = "eating_spaghetti_8_frames.npy"
elif num_frames == 16:
UpperCAmelCase_ = "eating_spaghetti.npy"
elif num_frames == 32:
UpperCAmelCase_ = "eating_spaghetti_32_frames.npy"
UpperCAmelCase_ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename=A_ , repo_type="dataset" , )
UpperCAmelCase_ = np.load(A_ )
return list(A_ )
def lowerCamelCase__ ( A_ , A_=None , A_=False ):
UpperCAmelCase_ = {
# fully supervised kinetics-400 checkpoints
"xclip-base-patch32": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth",
"xclip-base-patch32-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"
),
"xclip-base-patch16": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth",
"xclip-base-patch16-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"
),
"xclip-large-patch14": "https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb",
"xclip-large-patch14-16-frames": "https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f",
# fully supervised kinetics-600 checkpoints
"xclip-base-patch16-kinetics-600": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"
),
"xclip-base-patch16-kinetics-600-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"
),
"xclip-large-patch14-kinetics-600": "https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be",
# few shot
"xclip-base-patch16-hmdb-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"
),
"xclip-base-patch16-hmdb-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"
),
"xclip-base-patch16-hmdb-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"
),
"xclip-base-patch16-hmdb-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"
),
"xclip-base-patch16-ucf-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"
),
"xclip-base-patch16-ucf-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"
),
"xclip-base-patch16-ucf-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"
),
"xclip-base-patch16-ucf-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"
),
# zero shot
"xclip-base-patch16-zero-shot": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth",
}
UpperCAmelCase_ = model_to_url[model_name]
UpperCAmelCase_ = 8
if "16-frames" in model_name:
UpperCAmelCase_ = 16
elif "shot" in model_name:
UpperCAmelCase_ = 32
UpperCAmelCase_ = get_xclip_config(A_ , A_ )
UpperCAmelCase_ = XCLIPModel(A_ )
model.eval()
if "drive" in checkpoint_url:
UpperCAmelCase_ = "pytorch_model.bin"
gdown.cached_download(A_ , A_ , quiet=A_ )
UpperCAmelCase_ = torch.load(A_ , map_location="cpu" )["model"]
else:
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(A_ )["model"]
UpperCAmelCase_ = convert_state_dict(A_ , A_ )
UpperCAmelCase_ = XCLIPModel(A_ )
UpperCAmelCase_ , UpperCAmelCase_ = model.load_state_dict(A_ , strict=A_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCAmelCase_ = 336 if model_name == "xclip-large-patch14-16-frames" else 224
UpperCAmelCase_ = VideoMAEImageProcessor(size=A_ )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32" )
UpperCAmelCase_ = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32" )
UpperCAmelCase_ = XCLIPProcessor(image_processor=A_ , tokenizer=A_ )
UpperCAmelCase_ = prepare_video(A_ )
UpperCAmelCase_ = processor(
text=["playing sports", "eating spaghetti", "go shopping"] , videos=A_ , return_tensors="pt" , padding=A_ )
print("Shape of pixel values:" , inputs.pixel_values.shape )
with torch.no_grad():
UpperCAmelCase_ = model(**A_ )
# Verify outputs
UpperCAmelCase_ = outputs.logits_per_video
UpperCAmelCase_ = logits_per_video.softmax(dim=1 )
print("Probs:" , A_ )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCAmelCase_ = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCAmelCase_ = torch.tensor([[7.0_999e-04, 9.9_883e-01, 4.5_580e-04]] )
elif model_name == "xclip-base-patch16":
UpperCAmelCase_ = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCAmelCase_ = torch.tensor([[7.6_937e-04, 9.9_728e-01, 1.9_473e-03]] )
elif model_name == "xclip-large-patch14":
UpperCAmelCase_ = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase_ = torch.tensor([[3.3_877e-04, 9.9_937e-01, 2.8_888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCAmelCase_ = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCAmelCase_ = torch.tensor([[3.8_554e-04, 9.9_929e-01, 3.2_754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCAmelCase_ = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCAmelCase_ = torch.tensor([[7.1_890e-06, 9.9_994e-01, 5.6_559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCAmelCase_ = torch.tensor([[1.0_320e-05, 9.9_993e-01, 6.2_435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCAmelCase_ = torch.tensor([[4.1_377e-06, 9.9_990e-01, 9.8_386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCAmelCase_ = torch.tensor([[4.1_347e-05, 9.9_962e-01, 3.3_411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCAmelCase_ = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCAmelCase_ = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCAmelCase_ = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCAmelCase_ = torch.tensor([[9.8_219e-04, 9.9_593e-01, 3.0_863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCAmelCase_ = torch.tensor([[3.5_082e-04, 9.9_785e-01, 1.7_966e-03]] )
else:
raise ValueError(F"""Model name {model_name} not supported""" )
assert torch.allclose(A_ , A_ , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A_ )
if push_to_hub:
print("Pushing model, processor and slow tokenizer files to the hub..." )
model.push_to_hub(A_ , organization="nielsr" )
processor.push_to_hub(A_ , organization="nielsr" )
slow_tokenizer.push_to_hub(A_ , organization="nielsr" )
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__snake_case : Dict = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 660 | '''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
__snake_case : Any = _symbol_database.Default()
__snake_case : Dict = _descriptor_pool.Default().AddSerializedFile(
B'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'''
)
__snake_case : Union[str, Any] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
__snake_case : Any = None
__snake_case : Dict = B'''H\003'''
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
__snake_case : Union[str, Any] = 45
__snake_case : str = 15_81
__snake_case : Optional[int] = 15_17
__snake_case : Optional[Any] = 15_70
__snake_case : Union[str, Any] = 15_84
__snake_case : Any = 17_93
__snake_case : Optional[int] = 17_95
__snake_case : Tuple = 19_16
__snake_case : int = 18_64
__snake_case : Any = 19_05
__snake_case : Optional[int] = 19_19
__snake_case : str = 24_29
__snake_case : Tuple = 22_08
__snake_case : str = 24_18
__snake_case : Tuple = 23_23
__snake_case : Optional[int] = 24_07
# @@protoc_insertion_point(module_scope)
| 660 | 1 |
'''simple docstring'''
from timeit import timeit
def lowerCamelCase__ ( A_ ):
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase_ = 0
while number:
number &= number - 1
result += 1
return result
def lowerCamelCase__ ( A_ ):
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase_ = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowerCamelCase__ ( ):
def do_benchmark(A_ ) -> None:
UpperCAmelCase_ = "import __main__ as z"
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(A_ ) = }""" )
UpperCAmelCase_ = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=A_ )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(A_ ) = }""" )
UpperCAmelCase_ = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=A_ , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(A_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 660 | '''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase_ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = "The dog is cute and lives in the garden house"
UpperCAmelCase_ = jnp.array([tokenizer.encode(UpperCamelCase__ )] )
UpperCAmelCase_ = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
UpperCAmelCase_ = model(UpperCamelCase__ )["last_hidden_state"]
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1e-3 ) )
| 660 | 1 |
'''simple docstring'''
def lowerCamelCase__ ( A_ ):
if n_term == "":
return []
UpperCAmelCase_ = []
for temp in range(int(A_ ) ):
series.append(F"""1/{temp + 1}""" if series else "1" )
return series
if __name__ == "__main__":
__snake_case : Tuple = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 660 | '''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A_ , A_ , A_ ):
# Construct model
if gpta_config_file == "":
UpperCAmelCase_ = GPTaConfig()
else:
UpperCAmelCase_ = GPTaConfig.from_json_file(A_ )
UpperCAmelCase_ = GPTaModel(A_ )
# Load weights from numpy
load_tf_weights_in_gpta(A_ , A_ , A_ )
# Save pytorch-model
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , A_ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(A_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
__snake_case : Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 | 1 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__snake_case : Optional[Any] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowercase_ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=1_6 , UpperCamelCase__=1_3 , UpperCamelCase__=7 , UpperCamelCase__=1_4 , UpperCamelCase__=1_0 , UpperCamelCase__=1_9 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=True , UpperCamelCase__=1_6 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=[1, 2, 3, 4, 5] , UpperCamelCase__=2_5 , UpperCamelCase__=5 , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = d_model
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = prediction_length
UpperCAmelCase_ = context_length
UpperCAmelCase_ = cardinality
UpperCAmelCase_ = num_time_features
UpperCAmelCase_ = lags_sequence
UpperCAmelCase_ = embedding_dimension
UpperCAmelCase_ = is_training
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = context_length
UpperCAmelCase_ = prediction_length + label_length
UpperCAmelCase_ = label_length
UpperCAmelCase_ = moving_average
UpperCAmelCase_ = autocorrelation_factor
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = config.context_length + max(config.lags_sequence )
UpperCAmelCase_ = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
UpperCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
UpperCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length] )
UpperCAmelCase_ = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.prepare_autoformer_inputs_dict(UpperCamelCase__ )
return config, inputs_dict
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModel(config=UpperCamelCase__ ).to(UpperCamelCase__ ).eval()
UpperCAmelCase_ = model(**UpperCamelCase__ )
UpperCAmelCase_ = outputs.encoder_last_hidden_state
UpperCAmelCase_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = model.get_encoder()
encoder.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = AutoformerEncoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = model.create_network_inputs(**UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
UpperCAmelCase_ = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
UpperCAmelCase_ = encoder(inputs_embeds=UpperCamelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
UpperCAmelCase_ = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
UpperCAmelCase_ = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
UpperCAmelCase_ = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
UpperCAmelCase_ = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = model.get_decoder()
decoder.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = AutoformerDecoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCAmelCase_ = decoder(
trend=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowercase_ ( _A , _A , unittest.TestCase ):
a_ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a_ = (AutoformerForPrediction,) if is_torch_available() else ()
a_ = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model_class.from_pretrained(UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
self.assertEqual(info["missing_keys"] , [] )
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCamelCase__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
pass
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = inspect.signature(getattr(UpperCamelCase__ , "forward" ) )
# The main input is the name of the argument after `self`
UpperCAmelCase_ = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCamelCase__ )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(UpperCamelCase__ )] , UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
UpperCAmelCase_ = getattr(self.model_tester , "seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "decoder_seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "encoder_seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "d_model" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "num_attention_heads" , UpperCamelCase__ )
UpperCAmelCase_ = d_model // num_attention_heads
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
UpperCAmelCase_ = len(UpperCamelCase__ )
UpperCAmelCase_ = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# decoder attentions
UpperCAmelCase_ = outputs.decoder_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
UpperCAmelCase_ = outputs.cross_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 2 , len(UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowerCamelCase__ ( A_="train-batch.pt" ):
UpperCAmelCase_ = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=A_ , repo_type="dataset" )
UpperCAmelCase_ = torch.load(A_ , map_location=A_ )
return batch
@require_torch
@slow
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch()
with torch.no_grad():
UpperCAmelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
UpperCAmelCase_ = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
UpperCAmelCase_ = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase_ = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
UpperCAmelCase_ = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=UpperCamelCase__ )
UpperCAmelCase_ = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCamelCase__ , rtol=1e-1 ) )
| 660 | '''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase__ ( A_ ):
def decorator(A_ ):
UpperCAmelCase_ = getattr(A_ , "handle_key" , [] )
handle += [key]
setattr(A_ , "handle_key" , A_ )
return func
return decorator
def lowerCamelCase__ ( *A_ ):
def decorator(A_ ):
UpperCAmelCase_ = getattr(A_ , "handle_key" , [] )
handle += keys
setattr(A_ , "handle_key" , A_ )
return func
return decorator
class lowercase_ ( _A ):
def __new__( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = super().__new__(cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not hasattr(UpperCamelCase__ , "key_handler" ):
setattr(UpperCamelCase__ , "key_handler" , {} )
setattr(UpperCamelCase__ , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
UpperCAmelCase_ = getattr(UpperCamelCase__ , "handle_key" , [] )
for key in handled_keys:
UpperCAmelCase_ = value
return new_cls
@staticmethod
def lowerCamelCase_ ( cls ) -> str:
"""simple docstring"""
UpperCAmelCase_ = get_character()
if char != KEYMAP["undefined"]:
UpperCAmelCase_ = ord(UpperCamelCase__ )
UpperCAmelCase_ = cls.key_handler.get(UpperCamelCase__ )
if handler:
UpperCAmelCase_ = char
return handler(cls )
else:
return None
def lowerCamelCase__ ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 660 | 1 |
'''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__snake_case : int = logging.get_logger(__name__)
class lowercase_ ( _A ):
def __init__( self , **UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["bs4"] )
super().__init__(**UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCAmelCase_ = parent.find_all(child.name , recursive=UpperCamelCase__ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(UpperCamelCase__ ) else next(i for i, s in enumerate(UpperCamelCase__ , 1 ) if s is child ) )
UpperCAmelCase_ = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = BeautifulSoup(UpperCamelCase__ , "html.parser" )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for element in html_code.descendants:
if type(UpperCamelCase__ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCAmelCase_ = html.unescape(UpperCamelCase__ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = self.xpath_soup(UpperCamelCase__ )
stringaxtag_seq.append(UpperCamelCase__ )
stringaxsubs_seq.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = ""
for tagname, subs in zip(UpperCamelCase__ , UpperCamelCase__ ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , UpperCamelCase__ ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase_ = False
# Check that strings has a valid type
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = True
elif isinstance(UpperCamelCase__ , (list, tuple) ):
if len(UpperCamelCase__ ) == 0 or isinstance(html_strings[0] , UpperCamelCase__ ):
UpperCAmelCase_ = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
F"""but is of type {type(UpperCamelCase__ )}.""" )
UpperCAmelCase_ = bool(isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(html_strings[0] , UpperCamelCase__ )) )
if not is_batched:
UpperCAmelCase_ = [html_strings]
# Get nodes + xpaths
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for html_string in html_strings:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.get_three_from_single(UpperCamelCase__ )
nodes.append(UpperCamelCase__ )
UpperCAmelCase_ = []
for node, tag_list, sub_list in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = self.construct_xpath(UpperCamelCase__ , UpperCamelCase__ )
xpath_strings.append(UpperCamelCase__ )
xpaths.append(UpperCamelCase__ )
# return as Dict
UpperCAmelCase_ = {"nodes": nodes, "xpaths": xpaths}
UpperCAmelCase_ = BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
return encoded_inputs
| 660 | '''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__snake_case : Optional[Any] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowercase_ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=1_6 , UpperCamelCase__=1_3 , UpperCamelCase__=7 , UpperCamelCase__=1_4 , UpperCamelCase__=1_0 , UpperCamelCase__=1_9 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=True , UpperCamelCase__=1_6 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=[1, 2, 3, 4, 5] , UpperCamelCase__=2_5 , UpperCamelCase__=5 , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = d_model
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = prediction_length
UpperCAmelCase_ = context_length
UpperCAmelCase_ = cardinality
UpperCAmelCase_ = num_time_features
UpperCAmelCase_ = lags_sequence
UpperCAmelCase_ = embedding_dimension
UpperCAmelCase_ = is_training
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = context_length
UpperCAmelCase_ = prediction_length + label_length
UpperCAmelCase_ = label_length
UpperCAmelCase_ = moving_average
UpperCAmelCase_ = autocorrelation_factor
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = config.context_length + max(config.lags_sequence )
UpperCAmelCase_ = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
UpperCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
UpperCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length] )
UpperCAmelCase_ = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.prepare_autoformer_inputs_dict(UpperCamelCase__ )
return config, inputs_dict
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModel(config=UpperCamelCase__ ).to(UpperCamelCase__ ).eval()
UpperCAmelCase_ = model(**UpperCamelCase__ )
UpperCAmelCase_ = outputs.encoder_last_hidden_state
UpperCAmelCase_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = model.get_encoder()
encoder.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = AutoformerEncoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = model.create_network_inputs(**UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
UpperCAmelCase_ = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
UpperCAmelCase_ = encoder(inputs_embeds=UpperCamelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
UpperCAmelCase_ = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
UpperCAmelCase_ = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
UpperCAmelCase_ = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
UpperCAmelCase_ = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = model.get_decoder()
decoder.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = AutoformerDecoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCAmelCase_ = decoder(
trend=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowercase_ ( _A , _A , unittest.TestCase ):
a_ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a_ = (AutoformerForPrediction,) if is_torch_available() else ()
a_ = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model_class.from_pretrained(UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
self.assertEqual(info["missing_keys"] , [] )
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCamelCase__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
pass
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = inspect.signature(getattr(UpperCamelCase__ , "forward" ) )
# The main input is the name of the argument after `self`
UpperCAmelCase_ = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCamelCase__ )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(UpperCamelCase__ )] , UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
UpperCAmelCase_ = getattr(self.model_tester , "seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "decoder_seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "encoder_seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "d_model" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "num_attention_heads" , UpperCamelCase__ )
UpperCAmelCase_ = d_model // num_attention_heads
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
UpperCAmelCase_ = len(UpperCamelCase__ )
UpperCAmelCase_ = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# decoder attentions
UpperCAmelCase_ = outputs.decoder_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
UpperCAmelCase_ = outputs.cross_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 2 , len(UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowerCamelCase__ ( A_="train-batch.pt" ):
UpperCAmelCase_ = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=A_ , repo_type="dataset" )
UpperCAmelCase_ = torch.load(A_ , map_location=A_ )
return batch
@require_torch
@slow
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch()
with torch.no_grad():
UpperCAmelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
UpperCAmelCase_ = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
UpperCAmelCase_ = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase_ = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
UpperCAmelCase_ = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=UpperCamelCase__ )
UpperCAmelCase_ = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCamelCase__ , rtol=1e-1 ) )
| 660 | 1 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = 3_2 , UpperCamelCase__ = True , UpperCamelCase__ = 1 / 2_5_5 , UpperCamelCase__ = True , UpperCamelCase__ = True , UpperCamelCase__ = [0.48145466, 0.4578275, 0.40821073] , UpperCamelCase__ = [0.26862954, 0.26130258, 0.27577711] , UpperCamelCase__ = True , UpperCamelCase__=7 , UpperCamelCase__=3_0 , UpperCamelCase__=4_0_0 , UpperCamelCase__=3 , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 2_8_8}
UpperCAmelCase_ = size_divisor
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=False ) -> int:
"""simple docstring"""
if not batched:
UpperCAmelCase_ = self.size["shortest_edge"]
UpperCAmelCase_ = image_inputs[0]
if isinstance(UpperCamelCase__ , Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2]
UpperCAmelCase_ = size / min(UpperCamelCase__ , UpperCamelCase__ )
if h < w:
UpperCAmelCase_ , UpperCAmelCase_ = size, scale * w
else:
UpperCAmelCase_ , UpperCAmelCase_ = scale * h, size
UpperCAmelCase_ = int((1_3_3_3 / 8_0_0) * size )
if max(UpperCamelCase__ , UpperCamelCase__ ) > max_size:
UpperCAmelCase_ = max_size / max(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ = newh * scale
UpperCAmelCase_ = neww * scale
UpperCAmelCase_ , UpperCAmelCase_ = int(newh + 0.5 ), int(neww + 0.5 )
UpperCAmelCase_ , UpperCAmelCase_ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase_ = max(UpperCamelCase__ , key=lambda UpperCamelCase__ : item[0] )[0]
UpperCAmelCase_ = max(UpperCamelCase__ , key=lambda UpperCamelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase_ ( _A , unittest.TestCase ):
a_ = BridgeTowerImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = BridgeTowerImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "size" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "size_divisor" ) )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
pass
def lowerCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 660 | '''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : Tuple = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
__snake_case : Tuple = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
__snake_case : Dict = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def lowerCamelCase__ ( A_ , A_ ):
with open(A_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
with open(A_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(A_ ):
UpperCAmelCase_ = b
UpperCAmelCase_ = idx
for wd in b:
UpperCAmelCase_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowercase_ ( _A ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|startoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ) -> int:
"""simple docstring"""
super().__init__(
unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , do_clean_text=UpperCamelCase__ , **UpperCamelCase__ , )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
UpperCAmelCase_ = do_clean_text
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = load_vocab_and_emoji(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return len(self.raw_vocab )
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(UpperCamelCase__ , clean=self.do_clean_text )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> int:
"""simple docstring"""
return self.vocab.get(UpperCamelCase__ , self.vocab.get(self.unk_token ) )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = "".join(UpperCamelCase__ ).strip()
return out_string
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ = 0
if os.path.isdir(UpperCamelCase__ ):
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase_ = token_index
writer.write(",".join(UpperCamelCase__ ) + "\n" )
index += 1
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , UpperCamelCase__ )
return vocab_file, emoji_file
class lowercase_ ( _A ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = vocab # same as swe
UpperCAmelCase_ = ids_to_tokens # same as bpe
UpperCAmelCase_ = emoji
UpperCAmelCase_ = np.max([len(UpperCamelCase__ ) for w in self.vocab.keys()] )
UpperCAmelCase_ = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
UpperCAmelCase_ = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
UpperCAmelCase_ = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
UpperCAmelCase_ = re.compile(
R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
UpperCAmelCase_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
UpperCAmelCase_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
UpperCAmelCase_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self ) -> int:
"""simple docstring"""
return len(self.ids_to_tokens )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = self.content_repattera.sub("<URL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<EMAIL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<TEL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<PRICE>" , UpperCamelCase__ )
UpperCAmelCase_ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=False ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace("\r\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\r" , "<BR>" )
UpperCAmelCase_ = text.replace("\t" , "<TAB>" )
UpperCAmelCase_ = text.replace("—" , "ー" )
UpperCAmelCase_ = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase_ = text.replace(UpperCamelCase__ , UpperCamelCase__ )
if clean:
UpperCAmelCase_ = self.clean_text(UpperCamelCase__ )
def check_simbol(UpperCamelCase__ ):
UpperCAmelCase_ = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 2:
UpperCAmelCase_ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc_2_a_1 and c <= 0Xc_2_b_f)
or (c >= 0Xc_7_8_0 and c <= 0Xc_7_8_3)
or (c >= 0Xc_a_b_9 and c <= 0Xc_b_b_f)
or (c >= 0Xc_c_8_0 and c <= 0Xc_d_a_2)
):
return True
return False
def checkuae(UpperCamelCase__ ):
UpperCAmelCase_ = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 3:
UpperCAmelCase_ = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe_2_8_0_8_0 and c <= 0Xe_2_b_0_7_f:
return True
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
while pos < len(UpperCamelCase__ ):
UpperCAmelCase_ = min(len(UpperCamelCase__ ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
UpperCAmelCase_ = [] # (token_id, token, pos)
for e in range(UpperCamelCase__ , UpperCamelCase__ , -1 ):
UpperCAmelCase_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(UpperCamelCase__ ) > 2:
UpperCAmelCase_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(UpperCamelCase__ ) > 0:
# the smallest token_id is adopted
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[0] )[0]
result.append(UpperCamelCase__ )
UpperCAmelCase_ = e
else:
UpperCAmelCase_ = pos + 1
UpperCAmelCase_ = text[pos:end]
if check_simbol(UpperCamelCase__ ):
result.append("<KIGOU>" )
elif checkuae(UpperCamelCase__ ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
UpperCAmelCase_ = end
return result
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__="\n" ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(UpperCamelCase__ )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = "".join(UpperCamelCase__ )
return text
| 660 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__snake_case : Tuple = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = ['''DPTFeatureExtractor''']
__snake_case : Optional[int] = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 660 | '''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__snake_case : Union[str, Any] = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def lowerCamelCase__ ( ):
UpperCAmelCase_ = Github(os.environ["GITHUB_TOKEN"] )
UpperCAmelCase_ = g.get_repo("huggingface/diffusers" )
UpperCAmelCase_ = repo.get_issues(state="open" )
for issue in open_issues:
UpperCAmelCase_ = sorted(issue.get_comments() , key=lambda A_ : i.created_at , reverse=A_ )
UpperCAmelCase_ = comments[0] if len(A_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 660 | 1 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__snake_case : Optional[int] = logging.getLogger(__name__)
def lowerCamelCase__ ( A_ , A_ ):
# save results
if os.path.exists(A_ ):
if os.path.exists(os.path.join(A_ , "config.json" ) ) and os.path.isfile(
os.path.join(A_ , "config.json" ) ):
os.remove(os.path.join(A_ , "config.json" ) )
if os.path.exists(os.path.join(A_ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(A_ , "pytorch_model.bin" ) ):
os.remove(os.path.join(A_ , "pytorch_model.bin" ) )
else:
os.makedirs(A_ )
model.save_pretrained(A_ )
def lowerCamelCase__ ( A_ , A_=False ):
UpperCAmelCase_ = 2
if unlogit:
UpperCAmelCase_ = torch.pow(A_ , A_ )
UpperCAmelCase_ = p * torch.log(A_ )
UpperCAmelCase_ = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase__ ( A_ ):
logger.info("lv, h >\t" + "\t".join(F"""{x + 1}""" for x in range(len(A_ ) ) ) )
for row in range(len(A_ ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=True , A_=True , A_=None , A_=False ):
UpperCAmelCase_ , UpperCAmelCase_ = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase_ = torch.zeros(A_ , A_ ).to(args.device )
UpperCAmelCase_ = torch.zeros(A_ , A_ ).to(args.device )
if head_mask is None:
UpperCAmelCase_ = torch.ones(A_ , A_ ).to(args.device )
head_mask.requires_grad_(requires_grad=A_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase_ = None
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 0.0
for step, inputs in enumerate(tqdm(A_ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase_ = tuple(t.to(args.device ) for t in inputs )
((UpperCAmelCase_) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase_ = model(A_ , labels=A_ , head_mask=A_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(A_ ):
UpperCAmelCase_ = entropy(attn.detach() , A_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(A_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase_ = 2
UpperCAmelCase_ = torch.pow(torch.pow(A_ , A_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
UpperCAmelCase_ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(A_ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(A_ )
logger.info("Head ranked by importance scores" )
UpperCAmelCase_ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase_ = torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase_ = head_ranks.view_as(A_ )
print_ad_tensor(A_ )
return attn_entropy, head_importance, total_loss
def lowerCamelCase__ ( A_ , A_ , A_ ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(A_ , A_ , A_ , compute_entropy=A_ )
UpperCAmelCase_ = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , A_ , original_score * args.masking_threshold )
UpperCAmelCase_ = torch.ones_like(A_ )
UpperCAmelCase_ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase_ = original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase_ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase_ = float("Inf" )
UpperCAmelCase_ = head_importance.view(-1 ).sort()[1]
if len(A_ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
UpperCAmelCase_ = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase_ = new_head_mask.view(-1 )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = new_head_mask.view_as(A_ )
UpperCAmelCase_ = new_head_mask.clone().detach()
print_ad_tensor(A_ )
# Compute metric and head importance again
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , head_mask=A_ )
UpperCAmelCase_ = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , A_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(A_ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase__ ( A_ , A_ , A_ , A_ ):
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(A_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(A_ , A_ ):
UpperCAmelCase_ = [
v,
]
assert sum(len(A_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(A_ )
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ , actually_pruned=A_ , )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , A_ , A_ , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , A_ , A_ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(A_ , args.output_dir )
def lowerCamelCase__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=A_ , type=A_ , required=A_ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=A_ , type=A_ , required=A_ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=A_ , type=A_ , required=A_ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=A_ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=A_ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=A_ , type=A_ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=A_ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=A_ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=A_ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=A_ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=A_ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=A_ , help="Batch size." )
parser.add_argument("--seed" , type=A_ , default=42 )
parser.add_argument("--local_rank" , type=A_ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=A_ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=A_ , default="" , help="Can be used for distant debugging." )
UpperCAmelCase_ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase_ = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
UpperCAmelCase_ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase_ = torch.device("cuda" , args.local_rank )
UpperCAmelCase_ = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase_ = nn.parallel.DistributedDataParallel(
A_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=A_ )
elif args.n_gpu > 1:
UpperCAmelCase_ = nn.DataParallel(A_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=A_ )
torch.save(A_ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , A_ )
# Prepare dataset
UpperCAmelCase_ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase_ = (torch.from_numpy(A_ ),)
UpperCAmelCase_ = TensorDataset(*A_ )
UpperCAmelCase_ = RandomSampler(A_ )
UpperCAmelCase_ = DataLoader(A_ , sampler=A_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(A_ , A_ , A_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase_ = mask_heads(A_ , A_ , A_ )
prune_heads(A_ , A_ , A_ , A_ )
if __name__ == "__main__":
main()
| 660 | '''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__snake_case : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase_ ( datasets.BuilderConfig ):
a_ = 1_0000
a_ = None
a_ = None
class lowercase_ ( datasets.ArrowBasedBuilder ):
a_ = ParquetConfig
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCAmelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase__ , (str, list, tuple) ):
UpperCAmelCase_ = data_files
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCAmelCase_ = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCamelCase__ ):
with open(UpperCamelCase__ , "rb" ) as f:
UpperCAmelCase_ = datasets.Features.from_arrow_schema(pq.read_schema(UpperCamelCase__ ) )
break
splits.append(datasets.SplitGenerator(name=UpperCamelCase__ , gen_kwargs={"files": files} ) )
return splits
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase_ = table_cast(UpperCamelCase__ , self.info.features.arrow_schema )
return pa_table
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase__ ) ):
with open(UpperCamelCase__ , "rb" ) as f:
UpperCAmelCase_ = pq.ParquetFile(UpperCamelCase__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
UpperCAmelCase_ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(UpperCamelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCamelCase__ )}: {e}""" )
raise
| 660 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__snake_case : Any = ['''small''', '''medium''', '''large''']
__snake_case : Dict = '''lm_head.decoder.weight'''
__snake_case : Any = '''lm_head.weight'''
def lowerCamelCase__ ( A_ , A_ ):
UpperCAmelCase_ = torch.load(A_ )
UpperCAmelCase_ = d.pop(A_ )
os.makedirs(A_ , exist_ok=A_ )
torch.save(A_ , os.path.join(A_ , A_ ) )
if __name__ == "__main__":
__snake_case : int = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
__snake_case : str = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__snake_case : Tuple = os.path.join(args.dialogpt_path, F'''{MODEL}_ft.pkl''')
__snake_case : List[Any] = F'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 660 | '''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : Tuple = {'''vocab_file''': '''spiece.model'''}
__snake_case : Dict = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
__snake_case : Tuple = {
'''AI-Sweden/gpt-sw3-126m''': 20_48,
'''AI-Sweden/gpt-sw3-350m''': 20_48,
'''AI-Sweden/gpt-sw3-1.6b''': 20_48,
'''AI-Sweden/gpt-sw3-6.7b''': 20_48,
'''AI-Sweden/gpt-sw3-20b''': 20_48,
}
class lowercase_ ( _A ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
"""simple docstring"""
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase_ = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
UpperCAmelCase_ = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
UpperCAmelCase_ = "<|endoftext|>" if eos_token is None else eos_token
UpperCAmelCase_ = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
UpperCAmelCase_ = unk_token if pad_token is None else pad_token
UpperCAmelCase_ = eos_token if bos_token is None else bos_token
else:
UpperCAmelCase_ = "<pad>" if pad_token is None else pad_token
UpperCAmelCase_ = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# Used for whitespace normalization in input texts
# fmt : off
UpperCAmelCase_ = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
UpperCAmelCase_ = re.compile(
F"""[{"".join(map(UpperCamelCase__ , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]""" )
def __getstate__( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
return len(self.sp_model )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = self.non_printing_characters_re.sub("" , UpperCamelCase__ )
# Normalize whitespaces
UpperCAmelCase_ = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
UpperCAmelCase_ = unicodedata.normalize("NFC" , UpperCamelCase__ )
return text
def lowerCamelCase_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.preprocess_text(UpperCamelCase__ )
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCamelCase__ )
@staticmethod
def lowerCamelCase_ ( UpperCamelCase__ ) -> str:
"""simple docstring"""
return out_string
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = ""
UpperCAmelCase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
UpperCAmelCase_ = True
UpperCAmelCase_ = []
else:
current_sub_tokens.append(UpperCamelCase__ )
UpperCAmelCase_ = False
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string
def lowerCamelCase_ ( self ) -> Dict[str, int]:
"""simple docstring"""
UpperCAmelCase_ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = self.preprocess_text(UpperCamelCase__ )
UpperCAmelCase_ = self.sp_model.encode(UpperCamelCase__ )
else:
UpperCAmelCase_ = [self.preprocess_text(UpperCamelCase__ ) for t in text]
UpperCAmelCase_ = self.sp_model.encode(UpperCamelCase__ )
if return_tensors is True or return_tensors == "pt":
UpperCAmelCase_ = torch.tensor(UpperCamelCase__ )
return token_ids
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
return self.sp_model.decode(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
UpperCAmelCase_ = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(UpperCamelCase__ ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=UpperCamelCase__ )
| 660 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Any = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {
'''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''',
}
class lowercase_ ( _A ):
a_ = """git_vision_model"""
def __init__( self , UpperCamelCase__=7_6_8 , UpperCamelCase__=3_0_7_2 , UpperCamelCase__=1_2 , UpperCamelCase__=1_2 , UpperCamelCase__=3 , UpperCamelCase__=2_2_4 , UpperCamelCase__=1_6 , UpperCamelCase__="quick_gelu" , UpperCamelCase__=1e-5 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , **UpperCamelCase__ , ) -> Tuple:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = hidden_act
@classmethod
def lowerCamelCase_ ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
UpperCAmelCase_ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class lowercase_ ( _A ):
a_ = """git"""
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=3_0_5_2_2 , UpperCamelCase__=7_6_8 , UpperCamelCase__=6 , UpperCamelCase__=1_2 , UpperCamelCase__=3_0_7_2 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=1_0_2_4 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=1_0_1 , UpperCamelCase__=1_0_2 , UpperCamelCase__=None , **UpperCamelCase__ , ) -> List[Any]:
"""simple docstring"""
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
if vision_config is None:
UpperCAmelCase_ = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
UpperCAmelCase_ = GitVisionConfig(**UpperCamelCase__ )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = tie_word_embeddings
UpperCAmelCase_ = num_image_with_embedding
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ = self.vision_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
| 660 | '''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=3 , UpperCamelCase__=1_8 , UpperCamelCase__=3_0 , UpperCamelCase__=4_0_0 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=[0.5, 0.5, 0.5] , UpperCamelCase__=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 1_8}
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
def lowerCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase_ ( _A , unittest.TestCase ):
a_ = LevitImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = LevitImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "size" ) )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
pass
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 660 | 1 |
import sys
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __lowercase ( snake_case = N ):
"""simple docstring"""
__magic_name__ :Optional[int] = -sys.maxsize - 1
for i in range(len(snake_case ) - 1_2 ):
__magic_name__ :List[Any] = 1
for j in range(1_3 ):
product *= int(n[i + j] )
if product > largest_product:
__magic_name__ :str = product
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 0 | '''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 100 , ):
UpperCAmelCase_ = x_start
UpperCAmelCase_ = fnc(A_ )
UpperCAmelCase_ = 0.0
for _ in range(A_ ):
# Approximates curve as a sequence of linear lines and sums their length
UpperCAmelCase_ = (x_end - x_start) / steps + xa
UpperCAmelCase_ = fnc(A_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
UpperCAmelCase_ = xa
UpperCAmelCase_ = fxa
return length
if __name__ == "__main__":
def lowerCamelCase__ ( A_ ):
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
__snake_case : List[Any] = 10
while i <= 10_00_00:
print(F'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 660 | 0 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _A ( _lowercase , _lowercase , _lowercase ) -> str:
"""simple docstring"""
if openai_config_file == "":
__UpperCamelCase = OpenAIGPTConfig()
else:
__UpperCamelCase = OpenAIGPTConfig.from_json_file(_lowercase )
__UpperCamelCase = OpenAIGPTModel(_lowercase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(_lowercase , _lowercase , _lowercase )
# Save pytorch-model
__UpperCamelCase = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__UpperCamelCase = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , _lowercase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_lowercase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--openai_checkpoint_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the TensorFlow checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--openai_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
__snake_case = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 1 | '''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowercase_ ( _A ):
a_ = """"""
a_ = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Tuple:
"""simple docstring"""
super().__init__(self , **UpperCamelCase__ )
UpperCAmelCase_ = repo_info
UpperCAmelCase_ = token
UpperCAmelCase_ = None
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
if self.dir_cache is None:
UpperCAmelCase_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase_ = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(UpperCamelCase__ ): {"name": str(UpperCamelCase__ ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = "rb" , **UpperCamelCase__ , ) -> Optional[int]:
"""simple docstring"""
if not isinstance(self.repo_info , UpperCamelCase__ ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
UpperCAmelCase_ = hf_hub_url(self.repo_info.id , UpperCamelCase__ , revision=self.repo_info.sha )
return fsspec.open(
UpperCamelCase__ , mode=UpperCamelCase__ , headers=get_authentication_headers_for_url(UpperCamelCase__ , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCamelCase_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
self._get_dirs()
UpperCAmelCase_ = self._strip_protocol(UpperCamelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=False , **UpperCamelCase__ ) -> str:
"""simple docstring"""
self._get_dirs()
UpperCAmelCase_ = PurePosixPath(path.strip("/" ) )
UpperCAmelCase_ = {}
for p, f in self.dir_cache.items():
UpperCAmelCase_ = PurePosixPath(p.strip("/" ) )
UpperCAmelCase_ = p.parent
if root == path:
UpperCAmelCase_ = f
UpperCAmelCase_ = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660 | 0 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] ) -> Optional[Any]:
_A = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Optional[int]:
_A , _A = emb.weight.shape
_A = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
_A = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :Tuple="facebook/mbart-large-en-ro" , _snake_case :Tuple=False , _snake_case :Optional[Any]=False ) -> Optional[int]:
_A = torch.load(_snake_case , map_location='''cpu''' )['''model''']
remove_ignore_keys_(_snake_case )
_A = state_dict['''encoder.embed_tokens.weight'''].shape[0]
_A = MBartConfig.from_pretrained(_snake_case , vocab_size=_snake_case )
if mbart_aa and finetuned:
_A = '''relu'''
_A = state_dict['''decoder.embed_tokens.weight''']
_A = MBartForConditionalGeneration(_snake_case )
model.model.load_state_dict(_snake_case )
if finetuned:
_A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 2 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Union[str, Any] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 660 | 0 |
'''simple docstring'''
def A_( A : int , A : int):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive')
UpperCamelCase = str(bin(A))[2:] # remove the leading "0b"
UpperCamelCase = str(bin(A))[2:] # remove the leading "0b"
UpperCamelCase = max(len(A) , len(A))
return "0b" + "".join(
str(int(char_a != char_b))
for char_a, char_b in zip(a_binary.zfill(A) , b_binary.zfill(A)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | '''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
class lowercase_ ( _A ):
a_ = """linear"""
a_ = """cosine"""
a_ = """cosine_with_restarts"""
a_ = """polynomial"""
a_ = """constant"""
a_ = """constant_with_warmup"""
a_ = """piecewise_constant"""
def lowerCamelCase__ ( A_ , A_ = -1 ):
return LambdaLR(A_ , lambda A_ : 1 , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1.0 , A_ ) )
return 1.0
return LambdaLR(A_ , A_ , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ = -1 ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = step_rules.split("," )
for rule_str in rule_list[:-1]:
UpperCAmelCase_ , UpperCAmelCase_ = rule_str.split(":" )
UpperCAmelCase_ = int(A_ )
UpperCAmelCase_ = float(A_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = float(rule_list[-1] )
def create_rules_function(A_ , A_ ):
def rule_func(A_ ) -> float:
UpperCAmelCase_ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(A_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
UpperCAmelCase_ = create_rules_function(A_ , A_ )
return LambdaLR(A_ , A_ , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=-1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 0.5 , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(A_ ) * 2.0 * progress )) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 1 , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(A_ ) * progress) % 1.0) )) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=1e-7 , A_=1.0 , A_=-1 ):
UpperCAmelCase_ = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
UpperCAmelCase_ = lr_init - lr_end
UpperCAmelCase_ = num_training_steps - num_warmup_steps
UpperCAmelCase_ = 1 - (current_step - num_warmup_steps) / decay_steps
UpperCAmelCase_ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(A_ , A_ , A_ )
__snake_case : str = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( A_ , A_ , A_ = None , A_ = None , A_ = None , A_ = 1 , A_ = 1.0 , A_ = -1 , ):
UpperCAmelCase_ = SchedulerType(A_ )
UpperCAmelCase_ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(A_ , last_epoch=A_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(A_ , step_rules=A_ , last_epoch=A_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(A_ , num_warmup_steps=A_ , last_epoch=A_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , num_cycles=A_ , last_epoch=A_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , power=A_ , last_epoch=A_ , )
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , last_epoch=A_ )
| 660 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class a ( a__ ):
snake_case__ = '''bert'''
def __init__( self , _snake_case=3_05_22 , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = classifier_dropout
class a ( a__ ):
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 4 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case : Optional[int] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__snake_case : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 660 | 0 |
'''simple docstring'''
from math import pi, sqrt
def A (__lowerCamelCase :float ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(__lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(__lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def A ():
assert gamma(0.5 ) == sqrt(__lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase = 1.0
while num:
_lowercase = float(input("""Gamma of: """))
print(F"""gamma({num}) = {gamma(num)}""")
print("""\nEnter 0 to exit...""")
| 5 | '''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case : Union[str, Any] = ''''''
__snake_case : List[Any] = ''''''
__snake_case : List[str] = ''''''
__snake_case : Any = ''''''
def lowerCamelCase__ ( A_ ):
# authorize twitter, initialize tweepy
UpperCAmelCase_ = tweepy.OAuthHandler(A_ , A_ )
auth.set_access_token(A_ , A_ )
UpperCAmelCase_ = tweepy.API(A_ )
# initialize a list to hold all the tweepy Tweets
UpperCAmelCase_ = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCAmelCase_ = api.user_timeline(screen_name=A_ , count=200 )
# save most recent tweets
alltweets.extend(A_ )
# save the id of the oldest tweet less one
UpperCAmelCase_ = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(A_ ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
UpperCAmelCase_ = api.user_timeline(
screen_name=A_ , count=200 , max_id=A_ )
# save most recent tweets
alltweets.extend(A_ )
# update the id of the oldest tweet less one
UpperCAmelCase_ = alltweets[-1].id - 1
print(F"""...{len(A_ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCAmelCase_ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , "w" ) as f:
UpperCAmelCase_ = csv.writer(A_ )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(A_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 660 | 0 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "segformer"
def __init__( self :Optional[int] , __A :Optional[Any]=3 , __A :Optional[Any]=4 , __A :int=[2, 2, 2, 2] , __A :int=[8, 4, 2, 1] , __A :str=[32, 64, 160, 256] , __A :Optional[int]=[7, 3, 3, 3] , __A :Optional[int]=[4, 2, 2, 2] , __A :int=[1, 2, 5, 8] , __A :List[str]=[4, 4, 4, 4] , __A :Union[str, Any]="gelu" , __A :Tuple=0.0 , __A :int=0.0 , __A :Optional[int]=0.1 , __A :Any=0.0_2 , __A :Union[str, Any]=0.1 , __A :Dict=1E-6 , __A :Optional[int]=256 , __A :Dict=255 , **__A :Any , ) -> Dict:
"""simple docstring"""
super().__init__(**__A )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __A , )
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = num_encoder_blocks
SCREAMING_SNAKE_CASE__ = depths
SCREAMING_SNAKE_CASE__ = sr_ratios
SCREAMING_SNAKE_CASE__ = hidden_sizes
SCREAMING_SNAKE_CASE__ = patch_sizes
SCREAMING_SNAKE_CASE__ = strides
SCREAMING_SNAKE_CASE__ = mlp_ratios
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = classifier_dropout_prob
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = drop_path_rate
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = decoder_hidden_size
SCREAMING_SNAKE_CASE__ = kwargs.get("""reshape_last_stage""" , __A )
SCREAMING_SNAKE_CASE__ = semantic_loss_ignore_index
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = version.parse("1.11" )
@property
def _snake_case ( self :int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _snake_case ( self :Union[str, Any] ) -> float:
"""simple docstring"""
return 1E-4
@property
def _snake_case ( self :List[str] ) -> int:
"""simple docstring"""
return 12 | 6 | '''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__snake_case : int = logging.get_logger(__name__)
class lowercase_ ( _A ):
def __init__( self , **UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["bs4"] )
super().__init__(**UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCAmelCase_ = parent.find_all(child.name , recursive=UpperCamelCase__ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(UpperCamelCase__ ) else next(i for i, s in enumerate(UpperCamelCase__ , 1 ) if s is child ) )
UpperCAmelCase_ = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = BeautifulSoup(UpperCamelCase__ , "html.parser" )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for element in html_code.descendants:
if type(UpperCamelCase__ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCAmelCase_ = html.unescape(UpperCamelCase__ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = self.xpath_soup(UpperCamelCase__ )
stringaxtag_seq.append(UpperCamelCase__ )
stringaxsubs_seq.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = ""
for tagname, subs in zip(UpperCamelCase__ , UpperCamelCase__ ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , UpperCamelCase__ ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase_ = False
# Check that strings has a valid type
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = True
elif isinstance(UpperCamelCase__ , (list, tuple) ):
if len(UpperCamelCase__ ) == 0 or isinstance(html_strings[0] , UpperCamelCase__ ):
UpperCAmelCase_ = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
F"""but is of type {type(UpperCamelCase__ )}.""" )
UpperCAmelCase_ = bool(isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(html_strings[0] , UpperCamelCase__ )) )
if not is_batched:
UpperCAmelCase_ = [html_strings]
# Get nodes + xpaths
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for html_string in html_strings:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.get_three_from_single(UpperCamelCase__ )
nodes.append(UpperCamelCase__ )
UpperCAmelCase_ = []
for node, tag_list, sub_list in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = self.construct_xpath(UpperCamelCase__ , UpperCamelCase__ )
xpath_strings.append(UpperCamelCase__ )
xpaths.append(UpperCamelCase__ )
# return as Dict
UpperCAmelCase_ = {"nodes": nodes, "xpaths": xpaths}
UpperCAmelCase_ = BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
return encoded_inputs
| 660 | 0 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger()
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : nn.Module
UpperCAmelCase : List[nn.Module] = field(default_factory=__lowerCAmelCase )
UpperCAmelCase : list = field(default_factory=__lowerCAmelCase )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Tensor , _UpperCAmelCase : Tensor ):
_A = len(list(m.modules() ) ) == 1 or isinstance(_UpperCAmelCase , nn.Convad ) or isinstance(_UpperCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_UpperCAmelCase )
def __call__( self : List[str] , _UpperCAmelCase : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_UpperCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def lowerCAmelCase_ ( self : Tuple ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : nn.Module
UpperCAmelCase : nn.Module
UpperCAmelCase : int = 0
UpperCAmelCase : List = field(default_factory=__lowerCAmelCase )
UpperCAmelCase : List = field(default_factory=__lowerCAmelCase )
def __call__( self : Dict , _UpperCAmelCase : Tensor ):
_A = Tracker(self.dest )(_UpperCAmelCase ).parametrized
_A = Tracker(self.src )(_UpperCAmelCase ).parametrized
_A = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.src_skip , _UpperCAmelCase ) )
_A = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.dest_skip , _UpperCAmelCase ) )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(_UpperCAmelCase )} operations while'''
F''' destination module has {len(_UpperCAmelCase )}.''' )
for dest_m, src_m in zip(_UpperCAmelCase , _UpperCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def _snake_case ( _snake_case : str , _snake_case : ResNetConfig , _snake_case : Path , _snake_case : bool = True ) -> Union[str, Any]:
'''simple docstring'''
print(F'''Converting {name}...''' )
with torch.no_grad():
_A = timm.create_model(_snake_case , pretrained=_snake_case ).eval()
_A = ResNetForImageClassification(_snake_case ).eval()
_A = ModuleTransfer(src=_snake_case , dest=_snake_case )
_A = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(_snake_case )
assert torch.allclose(from_model(_snake_case ) , our_model(_snake_case ).logits ), "The model logits don't match the original one."
_A = F'''resnet{"-".join(name.split("resnet" ) )}'''
print(_snake_case )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=_snake_case , )
# we can use the convnext one
_A = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=_snake_case , )
print(F'''Pushed {checkpoint_name}''' )
def _snake_case ( _snake_case : Path , _snake_case : str = None , _snake_case : bool = True ) -> Tuple:
'''simple docstring'''
_A = 'imagenet-1k-id2label.json'
_A = 10_00
_A = (1, num_labels)
_A = 'huggingface/label-files'
_A = num_labels
_A = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='dataset' ) , 'r' ) )
_A = {int(_snake_case ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
_A = partial(_snake_case , num_labels=_snake_case , idalabel=_snake_case , labelaid=_snake_case )
_A = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(_snake_case , names_to_config[model_name] , _snake_case , _snake_case )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(_snake_case , _snake_case , _snake_case , _snake_case )
return config, expected_shape
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
a = parser.parse_args()
a = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 7 | '''simple docstring'''
def lowerCamelCase__ ( A_ , A_ ):
_validate_point(A_ )
_validate_point(A_ )
if len(A_ ) != len(A_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(A_ , A_ ) ) )
def lowerCamelCase__ ( A_ ):
if point:
if isinstance(A_ , A_ ):
for item in point:
if not isinstance(A_ , (int, float) ):
UpperCAmelCase_ = (
"Expected a list of numbers as input, found "
F"""{type(A_ ).__name__}"""
)
raise TypeError(A_ )
else:
UpperCAmelCase_ = F"""Expected a list of numbers as input, found {type(A_ ).__name__}"""
raise TypeError(A_ )
else:
raise ValueError("Missing an input" )
def lowerCamelCase__ ( A_ , A_ ):
_validate_point(A_ )
_validate_point(A_ )
if len(A_ ) != len(A_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(A_ , A_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 0 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def _lowerCAmelCase ( __snake_case : str = "AAPL" ) -> str:
__A : Optional[Any] = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
__A : Optional[int] = BeautifulSoup(requests.get(__snake_case ).text , 'html.parser' )
__A : Tuple = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""") | 8 | '''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__snake_case : Optional[int] = logging.getLogger(__name__)
def lowerCamelCase__ ( A_ , A_ ):
# save results
if os.path.exists(A_ ):
if os.path.exists(os.path.join(A_ , "config.json" ) ) and os.path.isfile(
os.path.join(A_ , "config.json" ) ):
os.remove(os.path.join(A_ , "config.json" ) )
if os.path.exists(os.path.join(A_ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(A_ , "pytorch_model.bin" ) ):
os.remove(os.path.join(A_ , "pytorch_model.bin" ) )
else:
os.makedirs(A_ )
model.save_pretrained(A_ )
def lowerCamelCase__ ( A_ , A_=False ):
UpperCAmelCase_ = 2
if unlogit:
UpperCAmelCase_ = torch.pow(A_ , A_ )
UpperCAmelCase_ = p * torch.log(A_ )
UpperCAmelCase_ = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase__ ( A_ ):
logger.info("lv, h >\t" + "\t".join(F"""{x + 1}""" for x in range(len(A_ ) ) ) )
for row in range(len(A_ ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=True , A_=True , A_=None , A_=False ):
UpperCAmelCase_ , UpperCAmelCase_ = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase_ = torch.zeros(A_ , A_ ).to(args.device )
UpperCAmelCase_ = torch.zeros(A_ , A_ ).to(args.device )
if head_mask is None:
UpperCAmelCase_ = torch.ones(A_ , A_ ).to(args.device )
head_mask.requires_grad_(requires_grad=A_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase_ = None
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 0.0
for step, inputs in enumerate(tqdm(A_ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase_ = tuple(t.to(args.device ) for t in inputs )
((UpperCAmelCase_) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase_ = model(A_ , labels=A_ , head_mask=A_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(A_ ):
UpperCAmelCase_ = entropy(attn.detach() , A_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(A_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase_ = 2
UpperCAmelCase_ = torch.pow(torch.pow(A_ , A_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
UpperCAmelCase_ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(A_ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(A_ )
logger.info("Head ranked by importance scores" )
UpperCAmelCase_ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase_ = torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase_ = head_ranks.view_as(A_ )
print_ad_tensor(A_ )
return attn_entropy, head_importance, total_loss
def lowerCamelCase__ ( A_ , A_ , A_ ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(A_ , A_ , A_ , compute_entropy=A_ )
UpperCAmelCase_ = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , A_ , original_score * args.masking_threshold )
UpperCAmelCase_ = torch.ones_like(A_ )
UpperCAmelCase_ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase_ = original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase_ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase_ = float("Inf" )
UpperCAmelCase_ = head_importance.view(-1 ).sort()[1]
if len(A_ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
UpperCAmelCase_ = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase_ = new_head_mask.view(-1 )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = new_head_mask.view_as(A_ )
UpperCAmelCase_ = new_head_mask.clone().detach()
print_ad_tensor(A_ )
# Compute metric and head importance again
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , head_mask=A_ )
UpperCAmelCase_ = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , A_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(A_ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase__ ( A_ , A_ , A_ , A_ ):
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(A_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(A_ , A_ ):
UpperCAmelCase_ = [
v,
]
assert sum(len(A_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(A_ )
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ , actually_pruned=A_ , )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , A_ , A_ , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , A_ , A_ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(A_ , args.output_dir )
def lowerCamelCase__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=A_ , type=A_ , required=A_ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=A_ , type=A_ , required=A_ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=A_ , type=A_ , required=A_ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=A_ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=A_ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=A_ , type=A_ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=A_ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=A_ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=A_ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=A_ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=A_ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=A_ , help="Batch size." )
parser.add_argument("--seed" , type=A_ , default=42 )
parser.add_argument("--local_rank" , type=A_ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=A_ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=A_ , default="" , help="Can be used for distant debugging." )
UpperCAmelCase_ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase_ = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
UpperCAmelCase_ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase_ = torch.device("cuda" , args.local_rank )
UpperCAmelCase_ = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase_ = nn.parallel.DistributedDataParallel(
A_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=A_ )
elif args.n_gpu > 1:
UpperCAmelCase_ = nn.DataParallel(A_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=A_ )
torch.save(A_ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , A_ )
# Prepare dataset
UpperCAmelCase_ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase_ = (torch.from_numpy(A_ ),)
UpperCAmelCase_ = TensorDataset(*A_ )
UpperCAmelCase_ = RandomSampler(A_ )
UpperCAmelCase_ = DataLoader(A_ , sampler=A_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(A_ , A_ , A_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase_ = mask_heads(A_ , A_ , A_ )
prune_heads(A_ , A_ , A_ , A_ )
if __name__ == "__main__":
main()
| 660 | 0 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : str = None
A__ : str = BloomTokenizerFast
A__ : List[str] = BloomTokenizerFast
A__ : Union[str, Any] = True
A__ : int = False
A__ : List[Any] = "tokenizer_file"
A__ : Optional[Any] = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def _a ( self : Dict ):
"""simple docstring"""
super().setUp()
A__ = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self : Optional[int] , **_snake_case : Dict ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.get_rust_tokenizer()
A__ = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
A__ = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
A__ = tokenizer.batch_encode_plus(_snake_case )['input_ids']
self.assertListEqual(_snake_case , _snake_case )
A__ = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def _a ( self : Optional[int] , _snake_case : Dict=6 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
A__ = 'This is a simple input'
A__ = ['This is a simple input 1', 'This is a simple input 2']
A__ = ('This is a simple input', 'This is a pair')
A__ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(_snake_case , max_length=_snake_case )
tokenizer_r.encode_plus(_snake_case , max_length=_snake_case )
tokenizer_r.batch_encode_plus(_snake_case , max_length=_snake_case )
tokenizer_r.encode(_snake_case , max_length=_snake_case )
tokenizer_r.batch_encode_plus(_snake_case , max_length=_snake_case )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
A__ = None # Hotfixing padding = None
self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding='max_length' )
# Simple input
self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding='max_length' )
# Simple input
self.assertRaises(
_snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding='max_length' , )
# Pair input
self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding='max_length' )
# Pair input
self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding='max_length' )
# Pair input
self.assertRaises(
_snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding='max_length' , )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.get_rust_tokenizer()
A__ = load_dataset('xnli' , 'all_languages' , split='test' , streaming=_snake_case )
A__ = next(iter(_snake_case ) )['premise'] # pick up one data
A__ = list(sample_data.values() )
A__ = list(map(tokenizer.encode , _snake_case ) )
A__ = [tokenizer.decode(_snake_case , clean_up_tokenization_spaces=_snake_case ) for x in output_tokens]
self.assertListEqual(_snake_case , _snake_case )
def _a ( self : Optional[int] ):
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 9 | '''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__snake_case : str = logging.getLogger(__name__)
def lowerCamelCase__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=A_ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=A_ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=A_ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=A_ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase_ = parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
UpperCAmelCase_ = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase_ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase_ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase_ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase_ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase_ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase_ = fp.readlines()
logger.info("Start encoding" )
logger.info(F"""{len(A_ )} examples to process.""" )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
UpperCAmelCase_ = 10_000
UpperCAmelCase_ = time.time()
for text in data:
UpperCAmelCase_ = F"""{bos} {text.strip()} {sep}"""
UpperCAmelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
rslt.append(A_ )
iter += 1
if iter % interval == 0:
UpperCAmelCase_ = time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
UpperCAmelCase_ = time.time()
logger.info("Finished binarization" )
logger.info(F"""{len(A_ )} examples processed.""" )
UpperCAmelCase_ = F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
UpperCAmelCase_ = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase_ = [np.uintaa(A_ ) for d in rslt]
else:
UpperCAmelCase_ = [np.intaa(A_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(A_ , "wb" ) as handle:
pickle.dump(rslt_ , A_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 660 | 0 |
from functools import lru_cache
def _snake_case ( __snake_case ):
_UpperCamelCase = 2
_UpperCamelCase = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__snake_case )
if n > 1:
factors.add(__snake_case )
return factors
@lru_cache
def _snake_case ( __snake_case ):
return len(unique_prime_factors(__snake_case ) )
def _snake_case ( __snake_case ):
return len(set(__snake_case ) ) in (0, 1)
def _snake_case ( __snake_case ):
_UpperCamelCase = 2
while True:
# Increment each value of a generated range
_UpperCamelCase = [base + i for i in range(__snake_case )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_UpperCamelCase = [upf_len(__snake_case ) for x in group]
checker.append(__snake_case )
# If all numbers in the list are equal, return the group variable.
if equality(__snake_case ):
return group
# Increment our base variable by 1
base += 1
def _snake_case ( __snake_case = 4 ):
_UpperCamelCase = run(__snake_case )
return results[0] if len(__snake_case ) else None
if __name__ == "__main__":
print(solution())
| 10 | '''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__snake_case : Dict = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__snake_case : str = json.load(f)
@require_torch
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = FSMTForConditionalGeneration.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = F"""facebook/wmt19-{pair}"""
UpperCAmelCase_ = self.get_tokenizer(UpperCamelCase__ )
UpperCAmelCase_ = self.get_model(UpperCamelCase__ )
UpperCAmelCase_ = bleu_data[pair]["src"]
UpperCAmelCase_ = bleu_data[pair]["tgt"]
UpperCAmelCase_ = tokenizer(UpperCamelCase__ , return_tensors="pt" , truncation=UpperCamelCase__ , padding="longest" ).to(UpperCamelCase__ )
UpperCAmelCase_ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase_ = tokenizer.batch_decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
UpperCAmelCase_ = calculate_bleu(UpperCamelCase__ , UpperCamelCase__ )
print(UpperCamelCase__ )
self.assertGreaterEqual(scores["bleu"] , UpperCamelCase__ )
| 660 | 0 |
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
lowercase_ = "src/transformers"
# Matches is_xxx_available()
lowercase_ = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
lowercase_ = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowercase_ = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
lowercase_ = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
lowercase_ = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowercase_ = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
lowercase_ = re.compile(R"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowercase_ = re.compile(R"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
lowercase_ = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
lowercase_ = re.compile(R"^\s*try:")
# Catches a line with else:
lowercase_ = re.compile(R"^\s*else:")
def lowerCAmelCase (__A):
"""simple docstring"""
if _re_test_backend.search(__A) is None:
return None
_a = [b[0] for b in _re_backend.findall(__A)]
backends.sort()
return "_and_".join(__A)
def lowerCAmelCase (__A):
"""simple docstring"""
with open(__A , '''r''' , encoding='''utf-8''' , newline='''\n''') as f:
_a = f.readlines()
_a = 0
while line_index < len(__A) and not lines[line_index].startswith('''_import_structure = {'''):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__A):
return None
# First grab the objects without a specific backend in _import_structure
_a = []
while not lines[line_index].startswith('''if TYPE_CHECKING''') and find_backend(lines[line_index]) is None:
_a = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__A):
_a = _re_one_line_import_struct.search(__A).groups()[0]
_a = re.findall(r'''\[([^\]]+)\]''' , __A)
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''')])
line_index += 1
continue
_a = _re_import_struct_key_value.search(__A)
if single_line_import_search is not None:
_a = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''') if len(__A) > 0]
objects.extend(__A)
elif line.startswith(''' ''' * 8 + '''"'''):
objects.append(line[9:-3])
line_index += 1
_a = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING'''):
# If the line is an if not is_backend_available, we grab all objects associated.
_a = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
_a = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
_a = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(''' ''' * 4):
_a = lines[line_index]
if _re_import_struct_add_one.search(__A) is not None:
objects.append(_re_import_struct_add_one.search(__A).groups()[0])
elif _re_import_struct_add_many.search(__A) is not None:
_a = _re_import_struct_add_many.search(__A).groups()[0].split(''', ''')
_a = [obj[1:-1] for obj in imports if len(__A) > 0]
objects.extend(__A)
elif _re_between_brackets.search(__A) is not None:
_a = _re_between_brackets.search(__A).groups()[0].split(''', ''')
_a = [obj[1:-1] for obj in imports if len(__A) > 0]
objects.extend(__A)
elif _re_quote_object.search(__A) is not None:
objects.append(_re_quote_object.search(__A).groups()[0])
elif line.startswith(''' ''' * 8 + '''"'''):
objects.append(line[9:-3])
elif line.startswith(''' ''' * 12 + '''"'''):
objects.append(line[13:-3])
line_index += 1
_a = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_a = []
while (
line_index < len(__A)
and find_backend(lines[line_index]) is None
and not lines[line_index].startswith('''else''')
):
_a = lines[line_index]
_a = _re_import.search(__A)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', '''))
elif line.startswith(''' ''' * 8):
objects.append(line[8:-2])
line_index += 1
_a = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(__A):
# If the line is an if is_backend_available, we grab all objects associated.
_a = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
_a = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
_a = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(''' ''' * 8):
_a = lines[line_index]
_a = _re_import.search(__A)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', '''))
elif line.startswith(''' ''' * 12):
objects.append(line[12:-2])
line_index += 1
_a = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCAmelCase (__A , __A):
"""simple docstring"""
def find_duplicates(__A):
return [k for k, v in collections.Counter(__A).items() if v > 1]
if list(import_dict_objects.keys()) != list(type_hint_objects.keys()):
return ["Both sides of the init do not have the same backends!"]
_a = []
for key in import_dict_objects.keys():
_a = find_duplicates(import_dict_objects[key])
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''')
_a = find_duplicates(type_hint_objects[key])
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''')
if sorted(set(import_dict_objects[key])) != sorted(set(type_hint_objects[key])):
_a = '''base imports''' if key == '''none''' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''')
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''')
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''')
return errors
def lowerCAmelCase ():
"""simple docstring"""
_a = []
for root, _, files in os.walk(__A):
if "__init__.py" in files:
_a = os.path.join(__A , '''__init__.py''')
_a = parse_init(__A)
if objects is not None:
_a = analyze_results(*__A)
if len(__A) > 0:
_a = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(__A))
if len(__A) > 0:
raise ValueError('''\n\n'''.join(__A))
def lowerCAmelCase ():
"""simple docstring"""
_a = []
for path, directories, files in os.walk(__A):
for folder in directories:
# Ignore private modules
if folder.startswith('''_'''):
directories.remove(__A)
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__A) / folder).glob('''*.py'''))) == 0:
continue
_a = str((Path(__A) / folder).relative_to(__A))
_a = short_path.replace(os.path.sep , '''.''')
submodules.append(__A)
for fname in files:
if fname == "__init__.py":
continue
_a = str((Path(__A) / fname).relative_to(__A))
_a = short_path.replace('''.py''' , '''''').replace(os.path.sep , '''.''')
if len(submodule.split('''.''')) == 1:
submodules.append(__A)
return submodules
lowercase_ = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def lowerCAmelCase ():
"""simple docstring"""
from transformers.utils import direct_transformers_import
_a = direct_transformers_import(__A)
_a = set(transformers._import_structure.keys())
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__A , '''__init__.py''') , '''r''') as f:
_a = f.read()
import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , __A)))
_a = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__A) > 0:
_a = '''\n'''.join(F'''- {module}''' for module in module_not_registered)
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''')
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 11 | '''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__snake_case : List[Any] = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Optional[int] = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : int = {
'''sample_size''': 2_56,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Dict = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : Tuple = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : str = {
'''num_train_timesteps''': 1_51,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def lowerCamelCase__ ( A_ ):
if isinstance(A_ , A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=False ):
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=None ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.bias"""]
UpperCAmelCase_ = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = (
checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ ):
UpperCAmelCase_ = torch.load(A_ , map_location="cpu" )
UpperCAmelCase_ = {}
UpperCAmelCase_ = checkpoint["time_embed.0.weight"]
UpperCAmelCase_ = checkpoint["time_embed.0.bias"]
UpperCAmelCase_ = checkpoint["time_embed.2.weight"]
UpperCAmelCase_ = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase_ = checkpoint["label_emb.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.bias"]
UpperCAmelCase_ = unet_config["down_block_types"]
UpperCAmelCase_ = unet_config["layers_per_block"]
UpperCAmelCase_ = unet_config["attention_head_dim"]
UpperCAmelCase_ = unet_config["block_out_channels"]
UpperCAmelCase_ = 1
UpperCAmelCase_ = channels_list[0]
for i, layer_type in enumerate(A_ ):
UpperCAmelCase_ = channels_list[i]
UpperCAmelCase_ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""down_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""down_blocks.{i}.downsamplers.0"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
current_layer += 1
UpperCAmelCase_ = current_channels
# hardcoded the mid-block for now
UpperCAmelCase_ = "mid_block.resnets.0"
UpperCAmelCase_ = "middle_block.0"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.attentions.0"
UpperCAmelCase_ = "middle_block.1"
UpperCAmelCase_ = convert_attention(A_ , A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.resnets.1"
UpperCAmelCase_ = "middle_block.2"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = 0
UpperCAmelCase_ = unet_config["up_block_types"]
for i, layer_type in enumerate(A_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.1"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""up_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.2"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = checkpoint["out.0.weight"]
UpperCAmelCase_ = checkpoint["out.0.bias"]
UpperCAmelCase_ = checkpoint["out.2.weight"]
UpperCAmelCase_ = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
__snake_case : List[str] = parser.parse_args()
__snake_case : Any = strabool(args.class_cond)
__snake_case : List[str] = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
__snake_case : Optional[int] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__snake_case : List[str] = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
__snake_case : Optional[Any] = None
__snake_case : Optional[int] = con_pt_to_diffuser(args.unet_path, unet_config)
__snake_case : str = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__snake_case : Tuple = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__snake_case : Optional[int] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
__snake_case : Optional[Any] = CMStochasticIterativeScheduler(**scheduler_config)
__snake_case : Dict = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 660 | 0 |
lowerCamelCase__ : int = [
(1_0_0_0, """M"""),
(9_0_0, """CM"""),
(5_0_0, """D"""),
(4_0_0, """CD"""),
(1_0_0, """C"""),
(9_0, """XC"""),
(5_0, """L"""),
(4_0, """XL"""),
(1_0, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def UpperCamelCase ( lowercase_ ) -> int:
'''simple docstring'''
lowercase__ : List[str] = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 1_00, """D""": 5_00, """M""": 10_00}
lowercase__ : Optional[Any] = 0
lowercase__ : Any = 0
while place < len(lowercase_ ):
if (place + 1 < len(lowercase_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def UpperCamelCase ( lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : List[str] = []
for arabic, roman in ROMAN:
((lowercase__) , (lowercase__)) : Tuple = divmod(lowercase_ , lowercase_ )
result.append(roman * factor )
if number == 0:
break
return "".join(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | '''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
__snake_case : Any = _symbol_database.Default()
__snake_case : Dict = _descriptor_pool.Default().AddSerializedFile(
B'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'''
)
__snake_case : Union[str, Any] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
__snake_case : Any = None
__snake_case : Dict = B'''H\003'''
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
__snake_case : Union[str, Any] = 45
__snake_case : str = 15_81
__snake_case : Optional[int] = 15_17
__snake_case : Optional[Any] = 15_70
__snake_case : Union[str, Any] = 15_84
__snake_case : Any = 17_93
__snake_case : Optional[int] = 17_95
__snake_case : Tuple = 19_16
__snake_case : int = 18_64
__snake_case : Any = 19_05
__snake_case : Optional[int] = 19_19
__snake_case : str = 24_29
__snake_case : Tuple = 22_08
__snake_case : str = 24_18
__snake_case : Tuple = 23_23
__snake_case : Optional[int] = 24_07
# @@protoc_insertion_point(module_scope)
| 660 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
A__ : int = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A__ : int = 250004
A__ : Union[str, Any] = 250020
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[str] = MBartaaTokenizer
lowerCamelCase : Optional[int] = MBartaaTokenizerFast
lowerCamelCase : str = True
lowerCamelCase : Any = True
def lowercase_ ( self ) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : List[str] = MBartaaTokenizer(SCREAMING_SNAKE_CASE_ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self ) -> str:
__lowerCamelCase : List[Any] = '<s>'
__lowerCamelCase : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 10_54 )
def lowercase_ ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : List[Any] = MBartaaTokenizer(SCREAMING_SNAKE_CASE_ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__lowerCamelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
__lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def lowercase_ ( self ) -> int:
# fmt: off
__lowerCamelCase : str = {'input_ids': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def lowercase_ ( self ) -> Optional[int]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__lowerCamelCase : List[Any] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : List[str] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = tempfile.mkdtemp()
__lowerCamelCase : Dict = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
__lowerCamelCase : Union[str, Any] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
__lowerCamelCase : Optional[int] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=True
__lowerCamelCase : str = tempfile.mkdtemp()
__lowerCamelCase : Optional[Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
__lowerCamelCase : Tuple = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=False
__lowerCamelCase : int = tempfile.mkdtemp()
__lowerCamelCase : str = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowerCamelCase : Tuple = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[str] = 'facebook/mbart-large-50-one-to-many-mmt'
lowerCamelCase : Dict = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowerCamelCase : List[Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowerCamelCase : List[Any] = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def lowercase_ ( cls ) -> str:
__lowerCamelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
__lowerCamelCase : List[Any] = 1
return cls
def lowercase_ ( self ) -> str:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_00_38 )
def lowercase_ ( self ) -> Any:
__lowerCamelCase : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Dict:
self.assertIn(SCREAMING_SNAKE_CASE_ , self.tokenizer.all_special_ids )
__lowerCamelCase : List[str] = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
__lowerCamelCase : Union[str, Any] = self.tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : Optional[Any] = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = 10
__lowerCamelCase : Union[str, Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ ).input_ids[0]
self.assertEqual(ids[0] , SCREAMING_SNAKE_CASE_ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[int]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_00_53, 25_00_01] )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : str = tempfile.mkdtemp()
__lowerCamelCase : List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = MBartaaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE_ )
@require_torch
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
__lowerCamelCase : Any = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : Tuple = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
__lowerCamelCase : Tuple = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__lowerCamelCase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : Dict = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=3 , return_tensors='pt' )
__lowerCamelCase : Optional[Any] = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=10 , return_tensors='pt' )
__lowerCamelCase : Optional[Any] = targets['input_ids']
__lowerCamelCase : List[str] = shift_tokens_right(SCREAMING_SNAKE_CASE_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase_ ( self ) -> int:
__lowerCamelCase : List[str] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {
# en_XX, A, test, EOS
'input_ids': [[25_00_04, 62, 30_34, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_00_01,
} , )
| 13 | '''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase_ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = "The dog is cute and lives in the garden house"
UpperCAmelCase_ = jnp.array([tokenizer.encode(UpperCamelCase__ )] )
UpperCAmelCase_ = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
UpperCAmelCase_ = model(UpperCamelCase__ )["last_hidden_state"]
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1e-3 ) )
| 660 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = "microsoft/speecht5_tts"
UpperCAmelCase__ : int = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
UpperCAmelCase__ : List[Any] = "text_reader"
UpperCAmelCase__ : Optional[Any] = SpeechTaProcessor
UpperCAmelCase__ : Union[str, Any] = SpeechTaForTextToSpeech
UpperCAmelCase__ : Union[str, Any] = SpeechTaHifiGan
UpperCAmelCase__ : Dict = ["text"]
UpperCAmelCase__ : str = ["audio"]
def __lowercase ( self ) -> Tuple:
if self.post_processor is None:
_a : str = '''microsoft/speecht5_hifigan'''
super().setup()
def __lowercase ( self , _a , _a=None ) -> Optional[Any]:
_a : Union[str, Any] = self.pre_processor(text=_a , return_tensors='''pt''' , truncation=_a )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
_a : Dict = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
_a : List[str] = torch.tensor(embeddings_dataset[7_3_0_5]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __lowercase ( self , _a ) -> List[str]:
with torch.no_grad():
return self.model.generate_speech(**_a )
def __lowercase ( self , _a ) -> Tuple:
with torch.no_grad():
return self.post_processor(_a ).cpu().detach()
| 14 | '''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A_ , A_ , A_ ):
# Construct model
if gpta_config_file == "":
UpperCAmelCase_ = GPTaConfig()
else:
UpperCAmelCase_ = GPTaConfig.from_json_file(A_ )
UpperCAmelCase_ = GPTaModel(A_ )
# Load weights from numpy
load_tf_weights_in_gpta(A_ , A_ , A_ )
# Save pytorch-model
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , A_ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(A_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
__snake_case : Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 | 0 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
A : str = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
A : List[str] = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
A : Optional[int] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase__ (self : Tuple ) -> str:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/ROUGE_(metric)""",
"""https://github.com/google-research/google-research/tree/master/rouge""",
] , )
def lowerCamelCase__ (self : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : List[Any]=False ) -> Optional[Any]:
"""simple docstring"""
if rouge_types is None:
lowercase__ = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""]
lowercase__ = rouge_scorer.RougeScorer(rouge_types=_UpperCAmelCase , use_stemmer=_UpperCAmelCase )
if use_aggregator:
lowercase__ = scoring.BootstrapAggregator()
else:
lowercase__ = []
for ref, pred in zip(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = scorer.score(_UpperCAmelCase , _UpperCAmelCase )
if use_aggregator:
aggregator.add_scores(_UpperCAmelCase )
else:
scores.append(_UpperCAmelCase )
if use_aggregator:
lowercase__ = aggregator.aggregate()
else:
lowercase__ = {}
for key in scores[0]:
lowercase__ = [score[key] for score in scores]
return result
| 15 | '''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase__ ( A_ ):
def decorator(A_ ):
UpperCAmelCase_ = getattr(A_ , "handle_key" , [] )
handle += [key]
setattr(A_ , "handle_key" , A_ )
return func
return decorator
def lowerCamelCase__ ( *A_ ):
def decorator(A_ ):
UpperCAmelCase_ = getattr(A_ , "handle_key" , [] )
handle += keys
setattr(A_ , "handle_key" , A_ )
return func
return decorator
class lowercase_ ( _A ):
def __new__( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = super().__new__(cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not hasattr(UpperCamelCase__ , "key_handler" ):
setattr(UpperCamelCase__ , "key_handler" , {} )
setattr(UpperCamelCase__ , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
UpperCAmelCase_ = getattr(UpperCamelCase__ , "handle_key" , [] )
for key in handled_keys:
UpperCAmelCase_ = value
return new_cls
@staticmethod
def lowerCamelCase_ ( cls ) -> str:
"""simple docstring"""
UpperCAmelCase_ = get_character()
if char != KEYMAP["undefined"]:
UpperCAmelCase_ = ord(UpperCamelCase__ )
UpperCAmelCase_ = cls.key_handler.get(UpperCamelCase__ )
if handler:
UpperCAmelCase_ = char
return handler(cls )
else:
return None
def lowerCamelCase__ ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 660 | 0 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = PhobertTokenizer
lowerCamelCase__ = False
def _snake_case ( self : Tuple ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE = ["T@@", "i", "I", "R@@", "r", "e@@"]
SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE = ["#version: 0.2", "l à</w>"]
SCREAMING_SNAKE_CASE = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def _snake_case ( self : Dict , **__lowerCamelCase : Optional[int] ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _snake_case ( self : Any , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = "Tôi là VinAI Research"
SCREAMING_SNAKE_CASE = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"
return input_text, output_text
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE = "Tôi là VinAI Research"
SCREAMING_SNAKE_CASE = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split()
SCREAMING_SNAKE_CASE = tokenizer.tokenize(__lowerCamelCase )
print(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase ) | 16 | '''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__snake_case : Optional[Any] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowercase_ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=1_6 , UpperCamelCase__=1_3 , UpperCamelCase__=7 , UpperCamelCase__=1_4 , UpperCamelCase__=1_0 , UpperCamelCase__=1_9 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=True , UpperCamelCase__=1_6 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=[1, 2, 3, 4, 5] , UpperCamelCase__=2_5 , UpperCamelCase__=5 , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = d_model
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = prediction_length
UpperCAmelCase_ = context_length
UpperCAmelCase_ = cardinality
UpperCAmelCase_ = num_time_features
UpperCAmelCase_ = lags_sequence
UpperCAmelCase_ = embedding_dimension
UpperCAmelCase_ = is_training
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = context_length
UpperCAmelCase_ = prediction_length + label_length
UpperCAmelCase_ = label_length
UpperCAmelCase_ = moving_average
UpperCAmelCase_ = autocorrelation_factor
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = config.context_length + max(config.lags_sequence )
UpperCAmelCase_ = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
UpperCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
UpperCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length] )
UpperCAmelCase_ = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.prepare_autoformer_inputs_dict(UpperCamelCase__ )
return config, inputs_dict
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModel(config=UpperCamelCase__ ).to(UpperCamelCase__ ).eval()
UpperCAmelCase_ = model(**UpperCamelCase__ )
UpperCAmelCase_ = outputs.encoder_last_hidden_state
UpperCAmelCase_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = model.get_encoder()
encoder.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = AutoformerEncoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = model.create_network_inputs(**UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
UpperCAmelCase_ = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
UpperCAmelCase_ = encoder(inputs_embeds=UpperCamelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
UpperCAmelCase_ = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
UpperCAmelCase_ = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
UpperCAmelCase_ = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
UpperCAmelCase_ = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = model.get_decoder()
decoder.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = AutoformerDecoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCAmelCase_ = decoder(
trend=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowercase_ ( _A , _A , unittest.TestCase ):
a_ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a_ = (AutoformerForPrediction,) if is_torch_available() else ()
a_ = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model_class.from_pretrained(UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
self.assertEqual(info["missing_keys"] , [] )
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCamelCase__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
pass
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = inspect.signature(getattr(UpperCamelCase__ , "forward" ) )
# The main input is the name of the argument after `self`
UpperCAmelCase_ = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCamelCase__ )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(UpperCamelCase__ )] , UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
UpperCAmelCase_ = getattr(self.model_tester , "seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "decoder_seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "encoder_seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "d_model" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "num_attention_heads" , UpperCamelCase__ )
UpperCAmelCase_ = d_model // num_attention_heads
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
UpperCAmelCase_ = len(UpperCamelCase__ )
UpperCAmelCase_ = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# decoder attentions
UpperCAmelCase_ = outputs.decoder_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
UpperCAmelCase_ = outputs.cross_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 2 , len(UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowerCamelCase__ ( A_="train-batch.pt" ):
UpperCAmelCase_ = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=A_ , repo_type="dataset" )
UpperCAmelCase_ = torch.load(A_ , map_location=A_ )
return batch
@require_torch
@slow
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch()
with torch.no_grad():
UpperCAmelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
UpperCAmelCase_ = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
UpperCAmelCase_ = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase_ = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
UpperCAmelCase_ = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=UpperCamelCase__ )
UpperCAmelCase_ = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCamelCase__ , rtol=1e-1 ) )
| 660 | 0 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
UpperCAmelCase_ : Any = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def __SCREAMING_SNAKE_CASE ( a__ : str ) -> str:
__A : List[Any] = EfficientNetConfig()
__A : Union[str, Any] = CONFIG_MAP[model_name]["""hidden_dim"""]
__A : str = CONFIG_MAP[model_name]["""width_coef"""]
__A : List[Any] = CONFIG_MAP[model_name]["""depth_coef"""]
__A : Union[str, Any] = CONFIG_MAP[model_name]["""image_size"""]
__A : Tuple = CONFIG_MAP[model_name]["""dropout_rate"""]
__A : Optional[Any] = CONFIG_MAP[model_name]["""dw_padding"""]
__A : Optional[int] = """huggingface/label-files"""
__A : List[Any] = """imagenet-1k-id2label.json"""
__A : str = 1000
__A : Optional[Any] = json.load(open(hf_hub_download(a__ ,a__ ,repo_type="""dataset""" ) ,"""r""" ) )
__A : Optional[int] = {int(a__ ): v for k, v in idalabel.items()}
__A : Any = idalabel
__A : Any = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( ) -> int:
__A : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__A : int = Image.open(requests.get(a__ ,stream=a__ ).raw )
return im
def __SCREAMING_SNAKE_CASE ( a__ : Any ) -> Optional[int]:
__A : Any = CONFIG_MAP[model_name]["""image_size"""]
__A : Any = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} ,image_mean=[0.485, 0.456, 0.406] ,image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] ,do_center_crop=a__ ,)
return preprocessor
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ) -> str:
__A : Optional[int] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
__A : Union[str, Any] = sorted(set(a__ ) )
__A : Union[str, Any] = len(a__ )
__A : Tuple = {b: str(a__ ) for b, i in zip(a__ ,range(a__ ) )}
__A : Tuple = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
__A : Any = block_name_mapping[b]
rename_keys.append((f"""block{b}_expand_conv/kernel:0""", f"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((f"""block{b}_expand_bn/gamma:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((f"""block{b}_expand_bn/beta:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(f"""block{b}_dwconv/depthwise_kernel:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((f"""block{b}_bn/gamma:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((f"""block{b}_bn/beta:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(f"""block{b}_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(f"""block{b}_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((f"""block{b}_se_reduce/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((f"""block{b}_se_reduce/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((f"""block{b}_se_expand/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((f"""block{b}_se_expand/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(f"""block{b}_project_conv/kernel:0""", f"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((f"""block{b}_project_bn/gamma:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((f"""block{b}_project_bn/beta:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
__A : Dict = {}
for item in rename_keys:
if item[0] in original_param_names:
__A : Optional[Any] = """efficientnet.""" + item[1]
__A : Dict = """classifier.weight"""
__A : List[str] = """classifier.bias"""
return key_mapping
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ,a__ : str ,a__ : Any ) -> Optional[int]:
for key, value in tf_params.items():
if "normalization" in key:
continue
__A : Any = key_mapping[key]
if "_conv" in key and "kernel" in key:
__A : Dict = torch.from_numpy(a__ ).permute(3 ,2 ,0 ,1 )
elif "depthwise_kernel" in key:
__A : List[Any] = torch.from_numpy(a__ ).permute(2 ,3 ,0 ,1 )
elif "kernel" in key:
__A : Tuple = torch.from_numpy(np.transpose(a__ ) )
else:
__A : List[Any] = torch.from_numpy(a__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(a__ )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ,a__ : Dict ,a__ : Any ,a__ : Dict ) -> Tuple:
__A : Union[str, Any] = model_classes[model_name](
include_top=a__ ,weights="""imagenet""" ,input_tensor=a__ ,input_shape=a__ ,pooling=a__ ,classes=1000 ,classifier_activation="""softmax""" ,)
__A : str = original_model.trainable_variables
__A : Tuple = original_model.non_trainable_variables
__A : List[str] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__A : List[Any] = param.numpy()
__A : Union[str, Any] = list(tf_params.keys() )
# Load HuggingFace model
__A : str = get_efficientnet_config(a__ )
__A : int = EfficientNetForImageClassification(a__ ).eval()
__A : Optional[Any] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
__A : Dict = rename_keys(a__ )
replace_params(a__ ,a__ ,a__ )
# Initialize preprocessor and preprocess input image
__A : Tuple = convert_image_processor(a__ )
__A : Union[str, Any] = preprocessor(images=prepare_img() ,return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
__A : Union[str, Any] = hf_model(**a__ )
__A : str = outputs.logits.detach().numpy()
# Original model inference
__A : Union[str, Any] = False
__A : List[str] = CONFIG_MAP[model_name]["""image_size"""]
__A : Optional[Any] = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST )
__A : Dict = image.img_to_array(a__ )
__A : int = np.expand_dims(a__ ,axis=0 )
__A : List[str] = original_model.predict(a__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(a__ ,a__ ,atol=1E-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(a__ ):
os.mkdir(a__ )
# Save converted model and image processor
hf_model.save_pretrained(a__ )
preprocessor.save_pretrained(a__ )
if push_to_hub:
# Push model and image processor to hub
print(f"""Pushing converted {model_name} to the hub...""" )
__A : List[str] = f"""efficientnet-{model_name}"""
preprocessor.push_to_hub(a__ )
hf_model.push_to_hub(a__ )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 17 | '''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : Tuple = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
__snake_case : Tuple = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
__snake_case : Dict = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def lowerCamelCase__ ( A_ , A_ ):
with open(A_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
with open(A_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(A_ ):
UpperCAmelCase_ = b
UpperCAmelCase_ = idx
for wd in b:
UpperCAmelCase_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowercase_ ( _A ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|startoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ) -> int:
"""simple docstring"""
super().__init__(
unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , do_clean_text=UpperCamelCase__ , **UpperCamelCase__ , )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
UpperCAmelCase_ = do_clean_text
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = load_vocab_and_emoji(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return len(self.raw_vocab )
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(UpperCamelCase__ , clean=self.do_clean_text )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> int:
"""simple docstring"""
return self.vocab.get(UpperCamelCase__ , self.vocab.get(self.unk_token ) )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = "".join(UpperCamelCase__ ).strip()
return out_string
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ = 0
if os.path.isdir(UpperCamelCase__ ):
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase_ = token_index
writer.write(",".join(UpperCamelCase__ ) + "\n" )
index += 1
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , UpperCamelCase__ )
return vocab_file, emoji_file
class lowercase_ ( _A ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = vocab # same as swe
UpperCAmelCase_ = ids_to_tokens # same as bpe
UpperCAmelCase_ = emoji
UpperCAmelCase_ = np.max([len(UpperCamelCase__ ) for w in self.vocab.keys()] )
UpperCAmelCase_ = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
UpperCAmelCase_ = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
UpperCAmelCase_ = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
UpperCAmelCase_ = re.compile(
R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
UpperCAmelCase_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
UpperCAmelCase_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
UpperCAmelCase_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self ) -> int:
"""simple docstring"""
return len(self.ids_to_tokens )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = self.content_repattera.sub("<URL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<EMAIL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<TEL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<PRICE>" , UpperCamelCase__ )
UpperCAmelCase_ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=False ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace("\r\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\r" , "<BR>" )
UpperCAmelCase_ = text.replace("\t" , "<TAB>" )
UpperCAmelCase_ = text.replace("—" , "ー" )
UpperCAmelCase_ = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase_ = text.replace(UpperCamelCase__ , UpperCamelCase__ )
if clean:
UpperCAmelCase_ = self.clean_text(UpperCamelCase__ )
def check_simbol(UpperCamelCase__ ):
UpperCAmelCase_ = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 2:
UpperCAmelCase_ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc_2_a_1 and c <= 0Xc_2_b_f)
or (c >= 0Xc_7_8_0 and c <= 0Xc_7_8_3)
or (c >= 0Xc_a_b_9 and c <= 0Xc_b_b_f)
or (c >= 0Xc_c_8_0 and c <= 0Xc_d_a_2)
):
return True
return False
def checkuae(UpperCamelCase__ ):
UpperCAmelCase_ = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 3:
UpperCAmelCase_ = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe_2_8_0_8_0 and c <= 0Xe_2_b_0_7_f:
return True
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
while pos < len(UpperCamelCase__ ):
UpperCAmelCase_ = min(len(UpperCamelCase__ ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
UpperCAmelCase_ = [] # (token_id, token, pos)
for e in range(UpperCamelCase__ , UpperCamelCase__ , -1 ):
UpperCAmelCase_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(UpperCamelCase__ ) > 2:
UpperCAmelCase_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(UpperCamelCase__ ) > 0:
# the smallest token_id is adopted
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[0] )[0]
result.append(UpperCamelCase__ )
UpperCAmelCase_ = e
else:
UpperCAmelCase_ = pos + 1
UpperCAmelCase_ = text[pos:end]
if check_simbol(UpperCamelCase__ ):
result.append("<KIGOU>" )
elif checkuae(UpperCamelCase__ ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
UpperCAmelCase_ = end
return result
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__="\n" ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(UpperCamelCase__ )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = "".join(UpperCamelCase__ )
return text
| 660 | 0 |
'''simple docstring'''
from collections import namedtuple
_SCREAMING_SNAKE_CASE = namedtuple("from_to", "from_ to")
_SCREAMING_SNAKE_CASE = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 10_00),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.0_0454, 264.172),
"cubicyard": from_to(0.7_6455, 1.3_0795),
"cubicfoot": from_to(0.028, 35.3147),
"cup": from_to(0.0_0023_6588, 4226.75),
}
def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ", ".join(SCREAMING_SNAKE_CASE_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ", ".join(SCREAMING_SNAKE_CASE_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | '''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__snake_case : Union[str, Any] = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def lowerCamelCase__ ( ):
UpperCAmelCase_ = Github(os.environ["GITHUB_TOKEN"] )
UpperCAmelCase_ = g.get_repo("huggingface/diffusers" )
UpperCAmelCase_ = repo.get_issues(state="open" )
for issue in open_issues:
UpperCAmelCase_ = sorted(issue.get_comments() , key=lambda A_ : i.created_at , reverse=A_ )
UpperCAmelCase_ = comments[0] if len(A_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 660 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {
"""configuration_longformer""": [
"""LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LongformerConfig""",
"""LongformerOnnxConfig""",
],
"""tokenization_longformer""": ["""LongformerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ["""LongformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongformerForMaskedLM""",
"""LongformerForMultipleChoice""",
"""LongformerForQuestionAnswering""",
"""LongformerForSequenceClassification""",
"""LongformerForTokenClassification""",
"""LongformerModel""",
"""LongformerPreTrainedModel""",
"""LongformerSelfAttention""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLongformerForMaskedLM""",
"""TFLongformerForMultipleChoice""",
"""TFLongformerForQuestionAnswering""",
"""TFLongformerForSequenceClassification""",
"""TFLongformerForTokenClassification""",
"""TFLongformerModel""",
"""TFLongformerPreTrainedModel""",
"""TFLongformerSelfAttention""",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
_a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 19 | '''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__snake_case : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase_ ( datasets.BuilderConfig ):
a_ = 1_0000
a_ = None
a_ = None
class lowercase_ ( datasets.ArrowBasedBuilder ):
a_ = ParquetConfig
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCAmelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase__ , (str, list, tuple) ):
UpperCAmelCase_ = data_files
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCAmelCase_ = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCamelCase__ ):
with open(UpperCamelCase__ , "rb" ) as f:
UpperCAmelCase_ = datasets.Features.from_arrow_schema(pq.read_schema(UpperCamelCase__ ) )
break
splits.append(datasets.SplitGenerator(name=UpperCamelCase__ , gen_kwargs={"files": files} ) )
return splits
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase_ = table_cast(UpperCamelCase__ , self.info.features.arrow_schema )
return pa_table
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase__ ) ):
with open(UpperCamelCase__ , "rb" ) as f:
UpperCAmelCase_ = pq.ParquetFile(UpperCamelCase__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
UpperCAmelCase_ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(UpperCamelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCamelCase__ )}: {e}""" )
raise
| 660 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase_ (lowercase__ ):
snake_case =['image_processor', 'tokenizer']
snake_case ='ViltImageProcessor'
snake_case =('BertTokenizer', 'BertTokenizerFast')
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_) -> Optional[Any]:
a__ =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase_ , )
a__ =kwargs.pop('feature_extractor')
a__ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(lowercase_ , lowercase_)
a__ =self.image_processor
def __call__( self , lowercase_ , lowercase_ = None , lowercase_ = True , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = True , lowercase_ = None , **lowercase_ , ) -> BatchEncoding:
a__ =self.tokenizer(
text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel_values + pixel_mask
a__ =self.image_processor(lowercase_ , return_tensors=lowercase_)
encoding.update(lowercase_)
return encoding
def __UpperCamelCase ( self , *lowercase_ , **lowercase_) -> int:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def __UpperCamelCase ( self , *lowercase_ , **lowercase_) -> Tuple:
return self.tokenizer.decode(*lowercase_ , **lowercase_)
@property
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =self.tokenizer.model_input_names
a__ =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def __UpperCamelCase ( self) -> List[str]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase_ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase_ , )
return self.image_processor
| 20 | '''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : Tuple = {'''vocab_file''': '''spiece.model'''}
__snake_case : Dict = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
__snake_case : Tuple = {
'''AI-Sweden/gpt-sw3-126m''': 20_48,
'''AI-Sweden/gpt-sw3-350m''': 20_48,
'''AI-Sweden/gpt-sw3-1.6b''': 20_48,
'''AI-Sweden/gpt-sw3-6.7b''': 20_48,
'''AI-Sweden/gpt-sw3-20b''': 20_48,
}
class lowercase_ ( _A ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
"""simple docstring"""
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase_ = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
UpperCAmelCase_ = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
UpperCAmelCase_ = "<|endoftext|>" if eos_token is None else eos_token
UpperCAmelCase_ = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
UpperCAmelCase_ = unk_token if pad_token is None else pad_token
UpperCAmelCase_ = eos_token if bos_token is None else bos_token
else:
UpperCAmelCase_ = "<pad>" if pad_token is None else pad_token
UpperCAmelCase_ = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# Used for whitespace normalization in input texts
# fmt : off
UpperCAmelCase_ = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
UpperCAmelCase_ = re.compile(
F"""[{"".join(map(UpperCamelCase__ , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]""" )
def __getstate__( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
return len(self.sp_model )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = self.non_printing_characters_re.sub("" , UpperCamelCase__ )
# Normalize whitespaces
UpperCAmelCase_ = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
UpperCAmelCase_ = unicodedata.normalize("NFC" , UpperCamelCase__ )
return text
def lowerCamelCase_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.preprocess_text(UpperCamelCase__ )
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCamelCase__ )
@staticmethod
def lowerCamelCase_ ( UpperCamelCase__ ) -> str:
"""simple docstring"""
return out_string
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = ""
UpperCAmelCase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
UpperCAmelCase_ = True
UpperCAmelCase_ = []
else:
current_sub_tokens.append(UpperCamelCase__ )
UpperCAmelCase_ = False
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string
def lowerCamelCase_ ( self ) -> Dict[str, int]:
"""simple docstring"""
UpperCAmelCase_ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = self.preprocess_text(UpperCamelCase__ )
UpperCAmelCase_ = self.sp_model.encode(UpperCamelCase__ )
else:
UpperCAmelCase_ = [self.preprocess_text(UpperCamelCase__ ) for t in text]
UpperCAmelCase_ = self.sp_model.encode(UpperCamelCase__ )
if return_tensors is True or return_tensors == "pt":
UpperCAmelCase_ = torch.tensor(UpperCamelCase__ )
return token_ids
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
return self.sp_model.decode(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
UpperCAmelCase_ = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(UpperCamelCase__ ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=UpperCamelCase__ )
| 660 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
UpperCAmelCase_ : str = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
UpperCAmelCase_ : Union[str, Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class __A ( UpperCamelCase__ ):
UpperCamelCase = """whisper"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :Any , __snake_case :Any=5_18_65 , __snake_case :Optional[int]=80 , __snake_case :str=6 , __snake_case :List[str]=4 , __snake_case :List[Any]=6 , __snake_case :Any=4 , __snake_case :Union[str, Any]=15_36 , __snake_case :List[Any]=15_36 , __snake_case :str=0.0 , __snake_case :Optional[Any]=0.0 , __snake_case :int=5_02_57 , __snake_case :Optional[int]=True , __snake_case :List[str]=True , __snake_case :str="gelu" , __snake_case :Optional[Any]=2_56 , __snake_case :Union[str, Any]=0.0 , __snake_case :Any=0.0 , __snake_case :Optional[int]=0.0 , __snake_case :str=0.02 , __snake_case :Union[str, Any]=False , __snake_case :int=15_00 , __snake_case :List[Any]=4_48 , __snake_case :Optional[int]=5_02_56 , __snake_case :Tuple=5_02_56 , __snake_case :Any=5_02_56 , __snake_case :Dict=None , __snake_case :int=[2_20, 5_02_56] , __snake_case :List[Any]=False , __snake_case :Optional[Any]=2_56 , __snake_case :Tuple=False , __snake_case :int=0.05 , __snake_case :List[Any]=10 , __snake_case :Optional[int]=2 , __snake_case :Tuple=0.0 , __snake_case :Union[str, Any]=10 , __snake_case :Dict=0 , __snake_case :List[Any]=7 , **__snake_case :List[str] , ):
'''simple docstring'''
__magic_name__ : Any =vocab_size
__magic_name__ : Optional[Any] =num_mel_bins
__magic_name__ : Union[str, Any] =d_model
__magic_name__ : List[Any] =encoder_layers
__magic_name__ : str =encoder_attention_heads
__magic_name__ : List[str] =decoder_layers
__magic_name__ : List[str] =decoder_attention_heads
__magic_name__ : int =decoder_ffn_dim
__magic_name__ : Tuple =encoder_ffn_dim
__magic_name__ : Tuple =dropout
__magic_name__ : List[Any] =attention_dropout
__magic_name__ : Any =activation_dropout
__magic_name__ : List[Any] =activation_function
__magic_name__ : Union[str, Any] =init_std
__magic_name__ : Tuple =encoder_layerdrop
__magic_name__ : Optional[Any] =decoder_layerdrop
__magic_name__ : List[str] =use_cache
__magic_name__ : Any =encoder_layers
__magic_name__ : str =scale_embedding # scale factor will be sqrt(d_model) if True
__magic_name__ : Optional[int] =max_source_positions
__magic_name__ : str =max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__magic_name__ : Any =classifier_proj_size
__magic_name__ : Tuple =use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__magic_name__ : Any =apply_spec_augment
__magic_name__ : Optional[Any] =mask_time_prob
__magic_name__ : str =mask_time_length
__magic_name__ : List[str] =mask_time_min_masks
__magic_name__ : List[str] =mask_feature_prob
__magic_name__ : Dict =mask_feature_length
__magic_name__ : Dict =mask_feature_min_masks
__magic_name__ : Tuple =median_filter_width
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , suppress_tokens=__snake_case , begin_suppress_tokens=__snake_case , **__snake_case , )
class __A ( UpperCamelCase__ ):
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : int =OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
__magic_name__ : int ={0: """batch"""}
else:
__magic_name__ : Union[str, Any] ={0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction="""inputs""" )
return common_inputs
def A__ ( self :List[str] , __snake_case :Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __snake_case :int = -1 , __snake_case :int = -1 , __snake_case :bool = False , __snake_case :Optional["TensorType"] = None , __snake_case :int = 2_20_50 , __snake_case :float = 5.0 , __snake_case :int = 2_20 , ):
'''simple docstring'''
__magic_name__ : List[Any] =OrderedDict()
__magic_name__ : Optional[int] =OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=__snake_case , framework=__snake_case , sampling_rate=__snake_case , time_duration=__snake_case , frequency=__snake_case , )
__magic_name__ : List[Any] =encoder_inputs["""input_features"""].shape[2]
__magic_name__ : Tuple =encoder_sequence_length // 2 if self.use_past else seq_length
__magic_name__ : Union[str, Any] =super().generate_dummy_inputs(
preprocessor.tokenizer , __snake_case , __snake_case , __snake_case , __snake_case )
__magic_name__ : Optional[Any] =encoder_inputs.pop("""input_features""" )
__magic_name__ : Dict =decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
__magic_name__ : Optional[int] =decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return 1E-3
| 21 | '''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=3 , UpperCamelCase__=1_8 , UpperCamelCase__=3_0 , UpperCamelCase__=4_0_0 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=[0.5, 0.5, 0.5] , UpperCamelCase__=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 1_8}
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
def lowerCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase_ ( _A , unittest.TestCase ):
a_ = LevitImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = LevitImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "size" ) )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
pass
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 660 | 0 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class A ( _a ):
lowercase_ = 'Wav2Vec2FeatureExtractor'
lowercase_ = 'AutoTokenizer'
def __init__( self : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
_a = self.feature_extractor
_a = False
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
try:
return super().from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
except OSError:
warnings.warn(
F'Loading a tokenizer inside {cls.__name__} from a config that does not'
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , lowerCAmelCase_ , )
_a = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = WavaVecaCTCTokenizer.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
return cls(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
def __call__( self : str , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Any ) -> Optional[int]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase_ , **lowerCAmelCase_ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
_a = kwargs.pop('''raw_speech''' )
else:
_a = kwargs.pop('''audio''' , lowerCAmelCase_ )
_a = kwargs.pop('''sampling_rate''' , lowerCAmelCase_ )
_a = kwargs.pop('''text''' , lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
_a = args[0]
_a = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_a = self.feature_extractor(lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is not None:
_a = self.tokenizer(lowerCAmelCase_ , **lowerCAmelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_a = encodings['''input_ids''']
return inputs
def __lowerCAmelCase ( self : str , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Any ) -> Any:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCAmelCase_ , **lowerCAmelCase_ )
_a = kwargs.pop('''input_features''' , lowerCAmelCase_ )
_a = kwargs.pop('''labels''' , lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
_a = args[0]
_a = args[1:]
if input_features is not None:
_a = self.feature_extractor.pad(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
if labels is not None:
_a = self.tokenizer.pad(lowerCAmelCase_ , **lowerCAmelCase_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_a = labels['''input_ids''']
return input_features
def __lowerCAmelCase ( self : Optional[int] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[Any] ) -> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@contextmanager
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
_a = True
_a = self.tokenizer
yield
_a = self.feature_extractor
_a = False
| 22 | '''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 100 , ):
UpperCAmelCase_ = x_start
UpperCAmelCase_ = fnc(A_ )
UpperCAmelCase_ = 0.0
for _ in range(A_ ):
# Approximates curve as a sequence of linear lines and sums their length
UpperCAmelCase_ = (x_end - x_start) / steps + xa
UpperCAmelCase_ = fnc(A_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
UpperCAmelCase_ = xa
UpperCAmelCase_ = fxa
return length
if __name__ == "__main__":
def lowerCamelCase__ ( A_ ):
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
__snake_case : List[Any] = 10
while i <= 10_00_00:
print(F'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 660 | 0 |
def _snake_case (__lowercase):
UpperCamelCase_ = False
while is_sorted is False: # Until all the indices are traversed keep looping
UpperCamelCase_ = True
for i in range(0 , len(__lowercase) - 1 , 2): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
UpperCamelCase_ , UpperCamelCase_ = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCamelCase_ = False
for i in range(1 , len(__lowercase) - 1 , 2): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
UpperCamelCase_ , UpperCamelCase_ = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCamelCase_ = False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
snake_case__ : Union[str, Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
snake_case__ : Dict = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 23 | '''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowercase_ ( _A ):
a_ = """"""
a_ = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Tuple:
"""simple docstring"""
super().__init__(self , **UpperCamelCase__ )
UpperCAmelCase_ = repo_info
UpperCAmelCase_ = token
UpperCAmelCase_ = None
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
if self.dir_cache is None:
UpperCAmelCase_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase_ = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(UpperCamelCase__ ): {"name": str(UpperCamelCase__ ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = "rb" , **UpperCamelCase__ , ) -> Optional[int]:
"""simple docstring"""
if not isinstance(self.repo_info , UpperCamelCase__ ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
UpperCAmelCase_ = hf_hub_url(self.repo_info.id , UpperCamelCase__ , revision=self.repo_info.sha )
return fsspec.open(
UpperCamelCase__ , mode=UpperCamelCase__ , headers=get_authentication_headers_for_url(UpperCamelCase__ , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCamelCase_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
self._get_dirs()
UpperCAmelCase_ = self._strip_protocol(UpperCamelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=False , **UpperCamelCase__ ) -> str:
"""simple docstring"""
self._get_dirs()
UpperCAmelCase_ = PurePosixPath(path.strip("/" ) )
UpperCAmelCase_ = {}
for p, f in self.dir_cache.items():
UpperCAmelCase_ = PurePosixPath(p.strip("/" ) )
UpperCAmelCase_ = p.parent
if root == path:
UpperCAmelCase_ = f
UpperCAmelCase_ = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660 | 0 |
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase ( __lowerCAmelCase):
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''num_heads''' ) )
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[16, 48, 96] , __SCREAMING_SNAKE_CASE=[1, 3, 6] , __SCREAMING_SNAKE_CASE=[1, 2, 10] , __SCREAMING_SNAKE_CASE=[7, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2] , __SCREAMING_SNAKE_CASE=[2, 1, 1] , __SCREAMING_SNAKE_CASE=[2, 2, 2] , __SCREAMING_SNAKE_CASE=[False, False, True] , __SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-12 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=2 , ) -> Optional[Any]:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_sizes
__snake_case = patch_stride
__snake_case = patch_padding
__snake_case = is_training
__snake_case = use_labels
__snake_case = num_labels
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = num_heads
__snake_case = stride_kv
__snake_case = depth
__snake_case = cls_token
__snake_case = attention_drop_rate
__snake_case = initializer_range
__snake_case = layer_norm_eps
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__snake_case = CvtModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(__SCREAMING_SNAKE_CASE )
__snake_case = (self.image_size, self.image_size)
__snake_case , __snake_case = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__snake_case = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__snake_case = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__snake_case = self.num_labels
__snake_case = CvtForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
__lowercase : int = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
__lowercase : Union[str, Any] = (
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
__lowercase : Union[str, Any] = False
__lowercase : Optional[int] = False
__lowercase : Union[str, Any] = False
__lowercase : Optional[int] = False
__lowercase : str = False
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = CvtModelTester(self )
__snake_case = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(__SCREAMING_SNAKE_CASE )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__snake_case = outputs.hidden_states
__snake_case = len(self.model_tester.depth )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
pass
@slow
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = CvtModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ()-> str:
'''simple docstring'''
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase):
@cached_property
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__SCREAMING_SNAKE_CASE )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__snake_case = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
__snake_case = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
__snake_case = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 24 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Union[str, Any] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 660 | 0 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Optional[Any] = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE : str = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
SCREAMING_SNAKE_CASE : str = f"{src_lang}-{tgt_lang}"
SCREAMING_SNAKE_CASE : Tuple = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(_a , exist_ok=_a)
SCREAMING_SNAKE_CASE : Any = os.path.join(_a , "README.md")
print(f"Generating {path}")
with open(_a , "w" , encoding="utf-8") as f:
f.write(_a)
# make sure we are under the root of the project
a_ = Path(__file__).resolve().parent.parent.parent
a_ = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_ , a_ , a_ = model_name.split('-')
a_ = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang) | 25 | '''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
class lowercase_ ( _A ):
a_ = """linear"""
a_ = """cosine"""
a_ = """cosine_with_restarts"""
a_ = """polynomial"""
a_ = """constant"""
a_ = """constant_with_warmup"""
a_ = """piecewise_constant"""
def lowerCamelCase__ ( A_ , A_ = -1 ):
return LambdaLR(A_ , lambda A_ : 1 , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1.0 , A_ ) )
return 1.0
return LambdaLR(A_ , A_ , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ = -1 ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = step_rules.split("," )
for rule_str in rule_list[:-1]:
UpperCAmelCase_ , UpperCAmelCase_ = rule_str.split(":" )
UpperCAmelCase_ = int(A_ )
UpperCAmelCase_ = float(A_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = float(rule_list[-1] )
def create_rules_function(A_ , A_ ):
def rule_func(A_ ) -> float:
UpperCAmelCase_ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(A_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
UpperCAmelCase_ = create_rules_function(A_ , A_ )
return LambdaLR(A_ , A_ , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=-1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 0.5 , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(A_ ) * 2.0 * progress )) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 1 , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(A_ ) * progress) % 1.0) )) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=1e-7 , A_=1.0 , A_=-1 ):
UpperCAmelCase_ = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
UpperCAmelCase_ = lr_init - lr_end
UpperCAmelCase_ = num_training_steps - num_warmup_steps
UpperCAmelCase_ = 1 - (current_step - num_warmup_steps) / decay_steps
UpperCAmelCase_ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(A_ , A_ , A_ )
__snake_case : str = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( A_ , A_ , A_ = None , A_ = None , A_ = None , A_ = 1 , A_ = 1.0 , A_ = -1 , ):
UpperCAmelCase_ = SchedulerType(A_ )
UpperCAmelCase_ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(A_ , last_epoch=A_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(A_ , step_rules=A_ , last_epoch=A_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(A_ , num_warmup_steps=A_ , last_epoch=A_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , num_cycles=A_ , last_epoch=A_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , power=A_ , last_epoch=A_ , )
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , last_epoch=A_ )
| 660 | 0 |
'''simple docstring'''
import math
import sys
def _a ( _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : List[str] = """"""
try:
with open(_lowerCamelCase , """rb""" ) as binary_file:
__snake_case : Optional[Any] = binary_file.read()
for dat in data:
__snake_case : Union[str, Any] = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def _a ( _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : int = {"""0""": """0""", """1""": """1"""}
__snake_case , __snake_case : List[str] = """""", """"""
__snake_case : Dict = len(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__snake_case : str = lexicon[curr_string]
result += last_match_id
__snake_case : List[str] = last_match_id + """0"""
if math.loga(_lowerCamelCase ).is_integer():
__snake_case : Optional[int] = {}
for curr_key in list(_lowerCamelCase ):
__snake_case : Optional[Any] = lexicon.pop(_lowerCamelCase )
__snake_case : Optional[int] = new_lex
__snake_case : List[Any] = last_match_id + """1"""
index += 1
__snake_case : List[Any] = """"""
return result
def _a ( _lowerCamelCase , _lowerCamelCase ) -> None:
"""simple docstring"""
__snake_case : Optional[Any] = 8
try:
with open(_lowerCamelCase , """wb""" ) as opened_file:
__snake_case : Any = [
to_write[i : i + byte_length]
for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_lowerCamelCase , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def _a ( _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : List[Any] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
__snake_case : Optional[int] = data_bits[counter:]
__snake_case : int = data_bits[counter + 1 :]
return data_bits
def _a ( _lowerCamelCase , _lowerCamelCase ) -> None:
"""simple docstring"""
__snake_case : Union[str, Any] = read_file_binary(_lowerCamelCase )
__snake_case : Optional[int] = remove_prefix(_lowerCamelCase )
__snake_case : int = decompress_data(_lowerCamelCase )
write_file_binary(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 26 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case : Optional[int] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__snake_case : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 660 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Dict = logging.get_logger(__name__)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_A = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_A = [144, 192, 240]
_A = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_A = [96, 120, 144]
_A = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_A = [64, 80, 96]
_A = [16, 16, 24, 48, 64, 80, 320]
_A = 0.05
_A = 2.0
if mobilevit_name.startswith('deeplabv3_' ):
_A = 512
_A = 16
_A = 21
_A = 'pascal-voc-id2label.json'
else:
_A = 1_000
_A = 'imagenet-1k-id2label.json'
_A = 'huggingface/label-files'
_A = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
_A = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
return config
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Tuple:
"""simple docstring"""
for i in range(1 , 6 ):
if F"layer_{i}." in name:
_A = name.replace(F"layer_{i}." , F"encoder.layer.{i - 1}." )
if "conv_1." in name:
_A = name.replace('conv_1.' , 'conv_stem.' )
if ".block." in name:
_A = name.replace('.block.' , '.' )
if "exp_1x1" in name:
_A = name.replace('exp_1x1' , 'expand_1x1' )
if "red_1x1" in name:
_A = name.replace('red_1x1' , 'reduce_1x1' )
if ".local_rep.conv_3x3." in name:
_A = name.replace('.local_rep.conv_3x3.' , '.conv_kxk.' )
if ".local_rep.conv_1x1." in name:
_A = name.replace('.local_rep.conv_1x1.' , '.conv_1x1.' )
if ".norm." in name:
_A = name.replace('.norm.' , '.normalization.' )
if ".conv." in name:
_A = name.replace('.conv.' , '.convolution.' )
if ".conv_proj." in name:
_A = name.replace('.conv_proj.' , '.conv_projection.' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_A = name.replace(F".{i}.{j}." , F".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_A = name.replace(F".{i}.{j}." , F".{i}." )
if "expand_1x1" in name:
_A = name.replace('expand_1x1' , 'downsampling_layer.expand_1x1' )
if "conv_3x3" in name:
_A = name.replace('conv_3x3' , 'downsampling_layer.conv_3x3' )
if "reduce_1x1" in name:
_A = name.replace('reduce_1x1' , 'downsampling_layer.reduce_1x1' )
for i in range(2 , 5 ):
if F".global_rep.{i}.weight" in name:
_A = name.replace(F".global_rep.{i}.weight" , '.layernorm.weight' )
if F".global_rep.{i}.bias" in name:
_A = name.replace(F".global_rep.{i}.bias" , '.layernorm.bias' )
if ".global_rep." in name:
_A = name.replace('.global_rep.' , '.transformer.' )
if ".pre_norm_mha.0." in name:
_A = name.replace('.pre_norm_mha.0.' , '.layernorm_before.' )
if ".pre_norm_mha.1.out_proj." in name:
_A = name.replace('.pre_norm_mha.1.out_proj.' , '.attention.output.dense.' )
if ".pre_norm_ffn.0." in name:
_A = name.replace('.pre_norm_ffn.0.' , '.layernorm_after.' )
if ".pre_norm_ffn.1." in name:
_A = name.replace('.pre_norm_ffn.1.' , '.intermediate.dense.' )
if ".pre_norm_ffn.4." in name:
_A = name.replace('.pre_norm_ffn.4.' , '.output.dense.' )
if ".transformer." in name:
_A = name.replace('.transformer.' , '.transformer.layer.' )
if ".aspp_layer." in name:
_A = name.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in name:
_A = name.replace('.aspp_pool.' , '.' )
if "seg_head." in name:
_A = name.replace('seg_head.' , 'segmentation_head.' )
if "segmentation_head.classifier.classifier." in name:
_A = name.replace('segmentation_head.classifier.classifier.' , 'segmentation_head.classifier.' )
if "classifier.fc." in name:
_A = name.replace('classifier.fc.' , 'classifier.' )
elif (not base_model) and ("segmentation_head." not in name):
_A = 'mobilevit.' + name
return name
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> int:
"""simple docstring"""
if base_model:
_A = ''
else:
_A = 'mobilevit.'
for key in orig_state_dict.copy().keys():
_A = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if key[:8] == "encoder.":
_A = key[8:]
if "qkv" in key:
_A = key.split('.' )
_A = int(key_split[0][6:] ) - 1
_A = int(key_split[3] )
_A = model.get_submodule(F"{model_prefix}encoder.layer.{layer_num}" )
_A = layer.transformer.layer[transformer_num].attention.attention.all_head_size
_A = (
F"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
_A = val[:dim, :]
_A = val[dim : dim * 2, :]
_A = val[-dim:, :]
else:
_A = val[:dim]
_A = val[dim : dim * 2]
_A = val[-dim:]
else:
_A = val
return orig_state_dict
def __lowerCAmelCase( ) -> Tuple:
"""simple docstring"""
_A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_A = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> str:
"""simple docstring"""
_A = get_mobilevit_config(_SCREAMING_SNAKE_CASE )
# load original state_dict
_A = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
# load 🤗 model
if mobilevit_name.startswith('deeplabv3_' ):
_A = MobileViTForSemanticSegmentation(_SCREAMING_SNAKE_CASE ).eval()
else:
_A = MobileViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
_A = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileViTImageProcessor
_A = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_A = image_processor(images=prepare_img() , return_tensors='pt' )
_A = model(**_SCREAMING_SNAKE_CASE )
_A = outputs.logits
if mobilevit_name.startswith('deeplabv3_' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_A = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_A = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_A = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
_A = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
_A = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
_A = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
_A = {
'mobilevit_s': 'mobilevit-small',
'mobilevit_xs': 'mobilevit-x-small',
'mobilevit_xxs': 'mobilevit-xx-small',
'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small',
'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small',
'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small',
}
print('Pushing to the hub...' )
_A = model_mapping[mobilevit_name]
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization='apple' )
model.push_to_hub(_SCREAMING_SNAKE_CASE , organization='apple' )
if __name__ == "__main__":
__A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--mobilevit_name",
default="mobilevit_s",
type=str,
help=(
"Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"
" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."
),
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__A : str = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 27 | '''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case : Union[str, Any] = ''''''
__snake_case : List[Any] = ''''''
__snake_case : List[str] = ''''''
__snake_case : Any = ''''''
def lowerCamelCase__ ( A_ ):
# authorize twitter, initialize tweepy
UpperCAmelCase_ = tweepy.OAuthHandler(A_ , A_ )
auth.set_access_token(A_ , A_ )
UpperCAmelCase_ = tweepy.API(A_ )
# initialize a list to hold all the tweepy Tweets
UpperCAmelCase_ = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCAmelCase_ = api.user_timeline(screen_name=A_ , count=200 )
# save most recent tweets
alltweets.extend(A_ )
# save the id of the oldest tweet less one
UpperCAmelCase_ = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(A_ ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
UpperCAmelCase_ = api.user_timeline(
screen_name=A_ , count=200 , max_id=A_ )
# save most recent tweets
alltweets.extend(A_ )
# update the id of the oldest tweet less one
UpperCAmelCase_ = alltweets[-1].id - 1
print(F"""...{len(A_ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCAmelCase_ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , "w" ) as f:
UpperCAmelCase_ = csv.writer(A_ )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(A_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 660 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
self.test()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Optional[int] = False
while not completed:
if counter == 1:
self.reset()
SCREAMING_SNAKE_CASE : str = self.advance()
if not self.does_advance(A ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.update(A )
counter += 1
if counter > 10_000:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def UpperCamelCase_ ( self ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self, A=False ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super(A, self ).__init__()
if not isinstance(A, A ) or len(A ) == 0:
raise ValueError(F"`token_ids` has to be a non-empty list, but is {token_ids}." )
if any((not isinstance(A, A ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}." )
SCREAMING_SNAKE_CASE : Tuple = token_ids
SCREAMING_SNAKE_CASE : Tuple = len(self.token_ids )
SCREAMING_SNAKE_CASE : Optional[int] = -1 # the index of the currently fulfilled step
SCREAMING_SNAKE_CASE : Any = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not isinstance(A, A ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(A )}" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not isinstance(A, A ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(A )}" )
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : int = False
if self.does_advance(A ):
self.fulfilled_idx += 1
SCREAMING_SNAKE_CASE : int = True
if self.fulfilled_idx == (self.seqlen - 1):
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = completed
else:
# failed to make progress.
SCREAMING_SNAKE_CASE : int = True
self.reset()
return stepped, completed, reset
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : Dict = 0
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCamelCase_ ( self, A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = PhrasalConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : str = self.seqlen
SCREAMING_SNAKE_CASE : List[Any] = self.fulfilled_idx
SCREAMING_SNAKE_CASE : List[Any] = self.completed
return new_constraint
class _a :
'''simple docstring'''
def __init__( self, A, A=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = max([len(A ) for one in nested_token_ids] )
SCREAMING_SNAKE_CASE : Optional[int] = {}
for token_ids in nested_token_ids:
SCREAMING_SNAKE_CASE : Dict = root
for tidx, token_id in enumerate(A ):
if token_id not in level:
SCREAMING_SNAKE_CASE : Dict = {}
SCREAMING_SNAKE_CASE : Optional[Any] = level[token_id]
if no_subsets and self.has_subsets(A, A ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F" {nested_token_ids}." )
SCREAMING_SNAKE_CASE : Any = root
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.trie
for current_token in current_seq:
SCREAMING_SNAKE_CASE : Optional[Any] = start[current_token]
SCREAMING_SNAKE_CASE : Optional[Any] = list(start.keys() )
return next_tokens
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.next_tokens(A )
return len(A ) == 0
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = list(root.values() )
if len(A ) == 0:
return 1
else:
return sum([self.count_leaves(A ) for nn in next_nodes] )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.count_leaves(A )
return len(A ) != leaf_count
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super(A, self ).__init__()
if not isinstance(A, A ) or len(A ) == 0:
raise ValueError(F"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}." )
if any(not isinstance(A, A ) for token_ids in nested_token_ids ):
raise ValueError(F"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}." )
if any(
any((not isinstance(A, A ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." )
SCREAMING_SNAKE_CASE : Dict = DisjunctiveTrie(A )
SCREAMING_SNAKE_CASE : int = nested_token_ids
SCREAMING_SNAKE_CASE : int = self.trie.max_height
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[str] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.trie.next_tokens(self.current_seq )
if len(A ) == 0:
return None
else:
return token_list
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not isinstance(A, A ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(A )}" )
SCREAMING_SNAKE_CASE : List[str] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not isinstance(A, A ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(A )}" )
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
if self.does_advance(A ):
self.current_seq.append(A )
SCREAMING_SNAKE_CASE : Tuple = True
else:
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
SCREAMING_SNAKE_CASE : int = self.trie.reached_leaf(self.current_seq )
SCREAMING_SNAKE_CASE : List[str] = completed
return stepped, completed, reset
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Optional[int] = []
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCamelCase_ ( self, A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : Tuple = self.seqlen
SCREAMING_SNAKE_CASE : Dict = self.current_seq
SCREAMING_SNAKE_CASE : str = self.completed
return new_constraint
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = constraints
# max # of steps required to fulfill a given constraint
SCREAMING_SNAKE_CASE : List[str] = max([c.seqlen for c in constraints] )
SCREAMING_SNAKE_CASE : str = len(A )
SCREAMING_SNAKE_CASE : Any = False
self.init_state()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : str = [constraint.copy(stateful=A ) for constraint in self.constraints]
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
SCREAMING_SNAKE_CASE : List[str] = constraint.advance()
if isinstance(A, A ):
token_list.append(A )
elif isinstance(A, A ):
token_list.extend(A )
else:
SCREAMING_SNAKE_CASE : List[Any] = self.inprogress_constraint.advance()
if isinstance(A, A ):
token_list.append(A )
elif isinstance(A, A ):
token_list.extend(A )
if len(A ) == 0:
return None
else:
return token_list
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.add(A )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not isinstance(A, A ):
raise ValueError(F"`token_id` should be an `int`, but is `{token_id}`." )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = False, False
if self.completed:
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Dict = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.inprogress_constraint.update(A )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=A ) )
SCREAMING_SNAKE_CASE : Dict = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
SCREAMING_SNAKE_CASE : Any = None
if len(self.pending_constraints ) == 0:
# we're done!
SCREAMING_SNAKE_CASE : Optional[int] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(A ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = pending_constraint.update(A )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(A )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if not complete and stepped:
SCREAMING_SNAKE_CASE : Union[str, Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
SCREAMING_SNAKE_CASE : Any = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
SCREAMING_SNAKE_CASE : Dict = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCamelCase_ ( self, A=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
SCREAMING_SNAKE_CASE : int = [
constraint.copy(stateful=A ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
SCREAMING_SNAKE_CASE : List[str] = self.inprogress_constraint.copy(stateful=A )
SCREAMING_SNAKE_CASE : Optional[Any] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 28 | '''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__snake_case : int = logging.get_logger(__name__)
class lowercase_ ( _A ):
def __init__( self , **UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["bs4"] )
super().__init__(**UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCAmelCase_ = parent.find_all(child.name , recursive=UpperCamelCase__ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(UpperCamelCase__ ) else next(i for i, s in enumerate(UpperCamelCase__ , 1 ) if s is child ) )
UpperCAmelCase_ = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = BeautifulSoup(UpperCamelCase__ , "html.parser" )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for element in html_code.descendants:
if type(UpperCamelCase__ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCAmelCase_ = html.unescape(UpperCamelCase__ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = self.xpath_soup(UpperCamelCase__ )
stringaxtag_seq.append(UpperCamelCase__ )
stringaxsubs_seq.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = ""
for tagname, subs in zip(UpperCamelCase__ , UpperCamelCase__ ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , UpperCamelCase__ ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase_ = False
# Check that strings has a valid type
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = True
elif isinstance(UpperCamelCase__ , (list, tuple) ):
if len(UpperCamelCase__ ) == 0 or isinstance(html_strings[0] , UpperCamelCase__ ):
UpperCAmelCase_ = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
F"""but is of type {type(UpperCamelCase__ )}.""" )
UpperCAmelCase_ = bool(isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(html_strings[0] , UpperCamelCase__ )) )
if not is_batched:
UpperCAmelCase_ = [html_strings]
# Get nodes + xpaths
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for html_string in html_strings:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.get_three_from_single(UpperCamelCase__ )
nodes.append(UpperCamelCase__ )
UpperCAmelCase_ = []
for node, tag_list, sub_list in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = self.construct_xpath(UpperCamelCase__ , UpperCamelCase__ )
xpath_strings.append(UpperCamelCase__ )
xpaths.append(UpperCamelCase__ )
# return as Dict
UpperCAmelCase_ = {"nodes": nodes, "xpaths": xpaths}
UpperCAmelCase_ = BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
return encoded_inputs
| 660 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError('''String lengths must match!''' )
lowerCamelCase_ = 0
for chara, chara in zip(lowerCAmelCase__ ,lowerCAmelCase__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | '''simple docstring'''
def lowerCamelCase__ ( A_ , A_ ):
_validate_point(A_ )
_validate_point(A_ )
if len(A_ ) != len(A_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(A_ , A_ ) ) )
def lowerCamelCase__ ( A_ ):
if point:
if isinstance(A_ , A_ ):
for item in point:
if not isinstance(A_ , (int, float) ):
UpperCAmelCase_ = (
"Expected a list of numbers as input, found "
F"""{type(A_ ).__name__}"""
)
raise TypeError(A_ )
else:
UpperCAmelCase_ = F"""Expected a list of numbers as input, found {type(A_ ).__name__}"""
raise TypeError(A_ )
else:
raise ValueError("Missing an input" )
def lowerCamelCase__ ( A_ , A_ ):
_validate_point(A_ )
_validate_point(A_ )
if len(A_ ) != len(A_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(A_ , A_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 0 |
import random
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase = False ):
'''simple docstring'''
UpperCAmelCase_ : dict = {i: [] for i in range(_lowercase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_lowercase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_lowercase ):
for j in range(i + 1 , _lowercase ):
if random.random() < probability:
graph[i].append(_lowercase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_lowercase )
return graph
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
return {
i: [j for j in range(_lowercase ) if i != j] for i in range(_lowercase )
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 30 | '''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__snake_case : Optional[int] = logging.getLogger(__name__)
def lowerCamelCase__ ( A_ , A_ ):
# save results
if os.path.exists(A_ ):
if os.path.exists(os.path.join(A_ , "config.json" ) ) and os.path.isfile(
os.path.join(A_ , "config.json" ) ):
os.remove(os.path.join(A_ , "config.json" ) )
if os.path.exists(os.path.join(A_ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(A_ , "pytorch_model.bin" ) ):
os.remove(os.path.join(A_ , "pytorch_model.bin" ) )
else:
os.makedirs(A_ )
model.save_pretrained(A_ )
def lowerCamelCase__ ( A_ , A_=False ):
UpperCAmelCase_ = 2
if unlogit:
UpperCAmelCase_ = torch.pow(A_ , A_ )
UpperCAmelCase_ = p * torch.log(A_ )
UpperCAmelCase_ = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase__ ( A_ ):
logger.info("lv, h >\t" + "\t".join(F"""{x + 1}""" for x in range(len(A_ ) ) ) )
for row in range(len(A_ ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=True , A_=True , A_=None , A_=False ):
UpperCAmelCase_ , UpperCAmelCase_ = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase_ = torch.zeros(A_ , A_ ).to(args.device )
UpperCAmelCase_ = torch.zeros(A_ , A_ ).to(args.device )
if head_mask is None:
UpperCAmelCase_ = torch.ones(A_ , A_ ).to(args.device )
head_mask.requires_grad_(requires_grad=A_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase_ = None
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 0.0
for step, inputs in enumerate(tqdm(A_ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase_ = tuple(t.to(args.device ) for t in inputs )
((UpperCAmelCase_) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase_ = model(A_ , labels=A_ , head_mask=A_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(A_ ):
UpperCAmelCase_ = entropy(attn.detach() , A_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(A_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase_ = 2
UpperCAmelCase_ = torch.pow(torch.pow(A_ , A_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
UpperCAmelCase_ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(A_ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(A_ )
logger.info("Head ranked by importance scores" )
UpperCAmelCase_ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase_ = torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase_ = head_ranks.view_as(A_ )
print_ad_tensor(A_ )
return attn_entropy, head_importance, total_loss
def lowerCamelCase__ ( A_ , A_ , A_ ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(A_ , A_ , A_ , compute_entropy=A_ )
UpperCAmelCase_ = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , A_ , original_score * args.masking_threshold )
UpperCAmelCase_ = torch.ones_like(A_ )
UpperCAmelCase_ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase_ = original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase_ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase_ = float("Inf" )
UpperCAmelCase_ = head_importance.view(-1 ).sort()[1]
if len(A_ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
UpperCAmelCase_ = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase_ = new_head_mask.view(-1 )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = new_head_mask.view_as(A_ )
UpperCAmelCase_ = new_head_mask.clone().detach()
print_ad_tensor(A_ )
# Compute metric and head importance again
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , head_mask=A_ )
UpperCAmelCase_ = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , A_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(A_ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase__ ( A_ , A_ , A_ , A_ ):
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(A_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(A_ , A_ ):
UpperCAmelCase_ = [
v,
]
assert sum(len(A_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(A_ )
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ , actually_pruned=A_ , )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , A_ , A_ , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , A_ , A_ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(A_ , args.output_dir )
def lowerCamelCase__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=A_ , type=A_ , required=A_ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=A_ , type=A_ , required=A_ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=A_ , type=A_ , required=A_ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=A_ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=A_ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=A_ , type=A_ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=A_ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=A_ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=A_ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=A_ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=A_ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=A_ , help="Batch size." )
parser.add_argument("--seed" , type=A_ , default=42 )
parser.add_argument("--local_rank" , type=A_ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=A_ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=A_ , default="" , help="Can be used for distant debugging." )
UpperCAmelCase_ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase_ = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
UpperCAmelCase_ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase_ = torch.device("cuda" , args.local_rank )
UpperCAmelCase_ = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase_ = nn.parallel.DistributedDataParallel(
A_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=A_ )
elif args.n_gpu > 1:
UpperCAmelCase_ = nn.DataParallel(A_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=A_ )
torch.save(A_ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , A_ )
# Prepare dataset
UpperCAmelCase_ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase_ = (torch.from_numpy(A_ ),)
UpperCAmelCase_ = TensorDataset(*A_ )
UpperCAmelCase_ = RandomSampler(A_ )
UpperCAmelCase_ = DataLoader(A_ , sampler=A_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(A_ , A_ , A_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase_ = mask_heads(A_ , A_ , A_ )
prune_heads(A_ , A_ , A_ , A_ )
if __name__ == "__main__":
main()
| 660 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str=2 , _lowerCAmelCase : str=True , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : Tuple=10 , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : Union[str, Any]=32 * 8 , _lowerCAmelCase : Dict=32 * 8 , _lowerCAmelCase : Optional[Any]=4 , _lowerCAmelCase : List[str]=64 , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_auxiliary_loss
SCREAMING_SNAKE_CASE_ = num_queries
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = min_size
SCREAMING_SNAKE_CASE_ = max_size
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = hidden_dim
SCREAMING_SNAKE_CASE_ = hidden_dim
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCAmelCase ) > 0.5
).float()
SCREAMING_SNAKE_CASE_ = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCAmelCase ) > 0.5).long()
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
SCREAMING_SNAKE_CASE_ = self.num_queries
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = [1, 1, 1, 1]
SCREAMING_SNAKE_CASE_ = self.num_channels
SCREAMING_SNAKE_CASE_ = 64
SCREAMING_SNAKE_CASE_ = 128
SCREAMING_SNAKE_CASE_ = self.hidden_dim
SCREAMING_SNAKE_CASE_ = self.hidden_dim
SCREAMING_SNAKE_CASE_ = self.hidden_dim
return config
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE_ = output.encoder_hidden_states
SCREAMING_SNAKE_CASE_ = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , config.decoder_layers )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple=False ):
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = MaskaFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
def comm_check_on_output(_lowerCAmelCase : Any ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(
pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowercase_ = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowerCAmelCase )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def lowerCAmelCase_ ( self : int ):
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def lowerCAmelCase_ ( self : int ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCAmelCase_ ( self : List[Any] ):
pass
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
SCREAMING_SNAKE_CASE_ = MaskaFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE_ = {
'pixel_values': torch.randn((2, 3, *size) , device=_lowerCAmelCase ),
'mask_labels': torch.randn((2, 10, *size) , device=_lowerCAmelCase ),
'class_labels': torch.zeros(2 , 10 , device=_lowerCAmelCase ).long(),
}
SCREAMING_SNAKE_CASE_ = self.model_tester.get_config()
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation(_lowerCAmelCase ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase , output_attentions=_lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase_ ( self : Tuple ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ = self.all_model_classes[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase ).loss
loss.backward()
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.all_model_classes[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase ).to(_lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ : List[Any] = 1E-4
def UpperCAmelCase_ ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : Any ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase_ ( self : int ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCAmelCase ).eval()
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
# masks_queries_logits
SCREAMING_SNAKE_CASE_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
SCREAMING_SNAKE_CASE_ = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
SCREAMING_SNAKE_CASE_ = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
# class_queries_logits
SCREAMING_SNAKE_CASE_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCAmelCase ).eval()
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = [el.to(_lowerCAmelCase ) for el in inputs['mask_labels']]
SCREAMING_SNAKE_CASE_ = [el.to(_lowerCAmelCase ) for el in inputs['class_labels']]
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None ) | 31 | '''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__snake_case : str = logging.getLogger(__name__)
def lowerCamelCase__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=A_ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=A_ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=A_ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=A_ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase_ = parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
UpperCAmelCase_ = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase_ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase_ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase_ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase_ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase_ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase_ = fp.readlines()
logger.info("Start encoding" )
logger.info(F"""{len(A_ )} examples to process.""" )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
UpperCAmelCase_ = 10_000
UpperCAmelCase_ = time.time()
for text in data:
UpperCAmelCase_ = F"""{bos} {text.strip()} {sep}"""
UpperCAmelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
rslt.append(A_ )
iter += 1
if iter % interval == 0:
UpperCAmelCase_ = time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
UpperCAmelCase_ = time.time()
logger.info("Finished binarization" )
logger.info(F"""{len(A_ )} examples processed.""" )
UpperCAmelCase_ = F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
UpperCAmelCase_ = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase_ = [np.uintaa(A_ ) for d in rslt]
else:
UpperCAmelCase_ = [np.intaa(A_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(A_ , "wb" ) as handle:
pickle.dump(rslt_ , A_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 660 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCAmelCase_ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class __UpperCamelCase ( unittest.TestCase ):
__A : List[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__A : List[str] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__A : Any = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__A : Optional[int] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = ZeroShotClassificationPipeline(
model=_UpperCamelCase , tokenizer=_UpperCamelCase , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(_UpperCamelCase , {'''sequence''': ANY(_UpperCamelCase ), '''labels''': [ANY(_UpperCamelCase )], '''scores''': [ANY(_UpperCamelCase )]} )
# No kwarg
_UpperCAmelCase = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(_UpperCamelCase , {'''sequence''': ANY(_UpperCamelCase ), '''labels''': [ANY(_UpperCamelCase )], '''scores''': [ANY(_UpperCamelCase )]} )
_UpperCAmelCase = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(_UpperCamelCase , {'''sequence''': ANY(_UpperCamelCase ), '''labels''': [ANY(_UpperCamelCase )], '''scores''': [ANY(_UpperCamelCase )]} )
_UpperCAmelCase = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
_UpperCamelCase , {'''sequence''': ANY(_UpperCamelCase ), '''labels''': [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )], '''scores''': [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
_UpperCAmelCase = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
_UpperCamelCase , {'''sequence''': ANY(_UpperCamelCase ), '''labels''': [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )], '''scores''': [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
_UpperCAmelCase = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(_UpperCamelCase , {'''sequence''': ANY(_UpperCamelCase ), '''labels''': [ANY(_UpperCamelCase )], '''scores''': [ANY(_UpperCamelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
_UpperCAmelCase = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
_UpperCamelCase , [
{'''sequence''': ANY(_UpperCamelCase ), '''labels''': [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )], '''scores''': [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )]}
for i in range(1 )
] , )
_UpperCAmelCase = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
_UpperCamelCase , [
{'''sequence''': ANY(_UpperCamelCase ), '''labels''': [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )], '''scores''': [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )]}
for i in range(2 )
] , )
with self.assertRaises(_UpperCamelCase ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(_UpperCamelCase ):
classifier(_UpperCamelCase , candidate_labels='''politics''' )
with self.assertRaises(_UpperCamelCase ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(_UpperCamelCase ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=_UpperCamelCase )
with self.assertRaises(_UpperCamelCase ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(_UpperCamelCase ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=_UpperCamelCase , )
self.run_entailment_id(_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = zero_shot_classifier.model.config
_UpperCAmelCase = config.labelaid
_UpperCAmelCase = zero_shot_classifier.entailment_id
_UpperCAmelCase = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
_UpperCAmelCase = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
_UpperCAmelCase = original_labelaid
self.assertEqual(_UpperCamelCase , zero_shot_classifier.entailment_id )
@require_torch
def UpperCamelCase( self ):
_UpperCAmelCase = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def UpperCamelCase( self ):
_UpperCAmelCase = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
_UpperCAmelCase = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_UpperCamelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@require_tf
def UpperCamelCase( self ):
_UpperCAmelCase = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
_UpperCAmelCase = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_UpperCamelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def UpperCamelCase( self ):
_UpperCAmelCase = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
_UpperCAmelCase = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_UpperCamelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
_UpperCAmelCase = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_UpperCamelCase , )
self.assertEqual(
nested_simplify(_UpperCamelCase ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def UpperCamelCase( self ):
_UpperCAmelCase = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
_UpperCAmelCase = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_UpperCamelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
_UpperCAmelCase = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_UpperCamelCase , )
self.assertEqual(
nested_simplify(_UpperCamelCase ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , ) | 32 | '''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__snake_case : Dict = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__snake_case : str = json.load(f)
@require_torch
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = FSMTForConditionalGeneration.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = F"""facebook/wmt19-{pair}"""
UpperCAmelCase_ = self.get_tokenizer(UpperCamelCase__ )
UpperCAmelCase_ = self.get_model(UpperCamelCase__ )
UpperCAmelCase_ = bleu_data[pair]["src"]
UpperCAmelCase_ = bleu_data[pair]["tgt"]
UpperCAmelCase_ = tokenizer(UpperCamelCase__ , return_tensors="pt" , truncation=UpperCamelCase__ , padding="longest" ).to(UpperCamelCase__ )
UpperCAmelCase_ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase_ = tokenizer.batch_decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
UpperCAmelCase_ = calculate_bleu(UpperCamelCase__ , UpperCamelCase__ )
print(UpperCamelCase__ )
self.assertGreaterEqual(scores["bleu"] , UpperCamelCase__ )
| 660 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ : Dict = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase__ : Optional[int] = {
"""google/realm-cc-news-pretrained-embedder""": 5_1_2,
"""google/realm-cc-news-pretrained-encoder""": 5_1_2,
"""google/realm-cc-news-pretrained-scorer""": 5_1_2,
"""google/realm-cc-news-pretrained-openqa""": 5_1_2,
"""google/realm-orqa-nq-openqa""": 5_1_2,
"""google/realm-orqa-nq-reader""": 5_1_2,
"""google/realm-orqa-wq-openqa""": 5_1_2,
"""google/realm-orqa-wq-reader""": 5_1_2,
}
lowerCamelCase__ : Optional[Any] = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Optional[Any] = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_INIT_CONFIGURATION
__lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = RealmTokenizer
def __init__( self:Dict , _a:List[str]=None , _a:Union[str, Any]=None , _a:Dict=True , _a:List[Any]="[UNK]" , _a:Any="[SEP]" , _a:Union[str, Any]="[PAD]" , _a:int="[CLS]" , _a:Tuple="[MASK]" , _a:List[Any]=True , _a:str=None , **_a:Any , ):
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
snake_case__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _a ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _a ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _a ) != tokenize_chinese_chars
):
snake_case__ = getattr(_a , normalizer_state.pop('''type''' ) )
snake_case__ = do_lower_case
snake_case__ = strip_accents
snake_case__ = tokenize_chinese_chars
snake_case__ = normalizer_class(**_a )
snake_case__ = do_lower_case
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:int , **_a:int ):
snake_case__ = PaddingStrategy.MAX_LENGTH
snake_case__ = text
snake_case__ = kwargs.pop('''text_pair''' , _a )
snake_case__ = kwargs.pop('''return_tensors''' , _a )
snake_case__ = {
'''input_ids''': [],
'''attention_mask''': [],
'''token_type_ids''': [],
}
for idx, candidate_text in enumerate(_a ):
if batch_text_pair is not None:
snake_case__ = batch_text_pair[idx]
else:
snake_case__ = None
snake_case__ = super().__call__(_a , _a , return_tensors=_a , **_a )
snake_case__ = encoded_candidates.get('''input_ids''' )
snake_case__ = encoded_candidates.get('''attention_mask''' )
snake_case__ = encoded_candidates.get('''token_type_ids''' )
if encoded_input_ids is not None:
output_data["input_ids"].append(_a )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_a )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_a )
snake_case__ = {key: item for key, item in output_data.items() if len(_a ) != 0}
return BatchEncoding(_a , tensor_type=_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:int , _a:int=None ):
snake_case__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:List[int] , _a:Optional[List[int]] = None ):
snake_case__ = [self.sep_token_id]
snake_case__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:str , _a:Optional[str] = None ):
snake_case__ = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 33 | '''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__snake_case : List[Any] = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Optional[int] = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : int = {
'''sample_size''': 2_56,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Dict = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : Tuple = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : str = {
'''num_train_timesteps''': 1_51,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def lowerCamelCase__ ( A_ ):
if isinstance(A_ , A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=False ):
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=None ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.bias"""]
UpperCAmelCase_ = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = (
checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ ):
UpperCAmelCase_ = torch.load(A_ , map_location="cpu" )
UpperCAmelCase_ = {}
UpperCAmelCase_ = checkpoint["time_embed.0.weight"]
UpperCAmelCase_ = checkpoint["time_embed.0.bias"]
UpperCAmelCase_ = checkpoint["time_embed.2.weight"]
UpperCAmelCase_ = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase_ = checkpoint["label_emb.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.bias"]
UpperCAmelCase_ = unet_config["down_block_types"]
UpperCAmelCase_ = unet_config["layers_per_block"]
UpperCAmelCase_ = unet_config["attention_head_dim"]
UpperCAmelCase_ = unet_config["block_out_channels"]
UpperCAmelCase_ = 1
UpperCAmelCase_ = channels_list[0]
for i, layer_type in enumerate(A_ ):
UpperCAmelCase_ = channels_list[i]
UpperCAmelCase_ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""down_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""down_blocks.{i}.downsamplers.0"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
current_layer += 1
UpperCAmelCase_ = current_channels
# hardcoded the mid-block for now
UpperCAmelCase_ = "mid_block.resnets.0"
UpperCAmelCase_ = "middle_block.0"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.attentions.0"
UpperCAmelCase_ = "middle_block.1"
UpperCAmelCase_ = convert_attention(A_ , A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.resnets.1"
UpperCAmelCase_ = "middle_block.2"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = 0
UpperCAmelCase_ = unet_config["up_block_types"]
for i, layer_type in enumerate(A_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.1"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""up_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.2"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = checkpoint["out.0.weight"]
UpperCAmelCase_ = checkpoint["out.0.bias"]
UpperCAmelCase_ = checkpoint["out.2.weight"]
UpperCAmelCase_ = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
__snake_case : List[str] = parser.parse_args()
__snake_case : Any = strabool(args.class_cond)
__snake_case : List[str] = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
__snake_case : Optional[int] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__snake_case : List[str] = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
__snake_case : Optional[Any] = None
__snake_case : Optional[int] = con_pt_to_diffuser(args.unet_path, unet_config)
__snake_case : str = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__snake_case : Tuple = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__snake_case : Optional[int] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
__snake_case : Optional[Any] = CMStochasticIterativeScheduler(**scheduler_config)
__snake_case : Dict = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 660 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = '▁'
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
SCREAMING_SNAKE_CASE_ = {
'google/pegasus-xsum': 512,
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = PegasusTokenizer
A_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_="<pad>" , lowerCamelCase_="</s>" , lowerCamelCase_="<unk>" , lowerCamelCase_="<mask_2>" , lowerCamelCase_="<mask_1>" , lowerCamelCase_=None , lowerCamelCase_=1_0_3 , **lowerCamelCase_ , ) -> Optional[int]:
UpperCamelCase = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
raise TypeError(
F'additional_special_tokens should be of type {type(lowerCamelCase_)}, but is'
F' {type(lowerCamelCase_)}')
UpperCamelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'<unk_{i}>' for i in range(len(lowerCamelCase_) , self.offset - 1)
]
if len(set(lowerCamelCase_)) != len(lowerCamelCase_):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.')
UpperCamelCase = additional_special_tokens_extended
else:
UpperCamelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'<unk_{i}>' for i in range(2 , self.offset)]
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , pad_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , mask_token_sent=lowerCamelCase_ , offset=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[Any]:
UpperCamelCase = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens) + 3)):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
F' {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}')
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase_)
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase_) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a) + [1]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(lowerCamelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase_):
copyfile(self.vocab_file , lowerCamelCase_)
return (out_vocab_file,) | 34 | '''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
__snake_case : Any = _symbol_database.Default()
__snake_case : Dict = _descriptor_pool.Default().AddSerializedFile(
B'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'''
)
__snake_case : Union[str, Any] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
__snake_case : Any = None
__snake_case : Dict = B'''H\003'''
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
__snake_case : Union[str, Any] = 45
__snake_case : str = 15_81
__snake_case : Optional[int] = 15_17
__snake_case : Optional[Any] = 15_70
__snake_case : Union[str, Any] = 15_84
__snake_case : Any = 17_93
__snake_case : Optional[int] = 17_95
__snake_case : Tuple = 19_16
__snake_case : int = 18_64
__snake_case : Any = 19_05
__snake_case : Optional[int] = 19_19
__snake_case : str = 24_29
__snake_case : Tuple = 22_08
__snake_case : str = 24_18
__snake_case : Tuple = 23_23
__snake_case : Optional[int] = 24_07
# @@protoc_insertion_point(module_scope)
| 660 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 35 | '''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase_ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = "The dog is cute and lives in the garden house"
UpperCAmelCase_ = jnp.array([tokenizer.encode(UpperCamelCase__ )] )
UpperCAmelCase_ = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
UpperCAmelCase_ = model(UpperCamelCase__ )["last_hidden_state"]
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1e-3 ) )
| 660 | 0 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _A ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case_ ( self ):
'''simple docstring'''
snake_case , snake_case : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" ,revision="""bf16""" ,dtype=jnp.bfloataa ,)
snake_case : int = """A painting of a squirrel eating a burger"""
snake_case : Dict = jax.device_count()
snake_case : Optional[Any] = num_samples * [prompt]
snake_case : Any = sd_pipe.prepare_inputs(SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = replicate(SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = shard(SCREAMING_SNAKE_CASE_ )
snake_case : Any = jax.random.PRNGKey(0 )
snake_case : List[Any] = jax.random.split(SCREAMING_SNAKE_CASE_ ,jax.device_count() )
snake_case : Dict = sd_pipe(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,num_inference_steps=25 ,jit=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
snake_case : Optional[int] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case : Union[str, Any] = images[0, 253:256, 253:256, -1]
snake_case : List[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case : Tuple = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Dict = """stabilityai/stable-diffusion-2"""
snake_case , snake_case : str = FlaxDPMSolverMultistepScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ ,subfolder="""scheduler""" )
snake_case , snake_case : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
SCREAMING_SNAKE_CASE_ ,scheduler=SCREAMING_SNAKE_CASE_ ,revision="""bf16""" ,dtype=jnp.bfloataa ,)
snake_case : int = scheduler_params
snake_case : int = """A painting of a squirrel eating a burger"""
snake_case : int = jax.device_count()
snake_case : List[str] = num_samples * [prompt]
snake_case : Union[str, Any] = sd_pipe.prepare_inputs(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = replicate(SCREAMING_SNAKE_CASE_ )
snake_case : Any = shard(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = jax.random.PRNGKey(0 )
snake_case : List[Any] = jax.random.split(SCREAMING_SNAKE_CASE_ ,jax.device_count() )
snake_case : Tuple = sd_pipe(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,num_inference_steps=25 ,jit=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
snake_case : List[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case : str = images[0, 253:256, 253:256, -1]
snake_case : List[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case : Union[str, Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 36 | '''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A_ , A_ , A_ ):
# Construct model
if gpta_config_file == "":
UpperCAmelCase_ = GPTaConfig()
else:
UpperCAmelCase_ = GPTaConfig.from_json_file(A_ )
UpperCAmelCase_ = GPTaModel(A_ )
# Load weights from numpy
load_tf_weights_in_gpta(A_ , A_ , A_ )
# Save pytorch-model
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , A_ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(A_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
__snake_case : Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 | 0 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = CLIPTokenizer
_lowercase = CLIPTokenizerFast
_lowercase = True
_lowercase = {}
_lowercase = False
def _UpperCamelCase( self : List[Any] ):
super().setUp()
# fmt: off
a__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
a__ : Optional[Any] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
a__ : Optional[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
a__ : Optional[Any] = {"unk_token": "<unk>"}
a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase__ ) )
def _UpperCamelCase( self : Dict , **lowerCamelCase__ : int ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] , **lowerCamelCase__ : Optional[int] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[Any] ):
a__ : int = "lower newer"
a__ : Optional[int] = "lower newer"
return input_text, output_text
def _UpperCamelCase( self : List[str] ):
a__ : Union[str, Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a__ : int = "lower newer"
a__ : List[str] = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
a__ : Union[str, Any] = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
a__ : int = tokens + [tokenizer.unk_token]
a__ : Union[str, Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
@require_ftfy
def _UpperCamelCase( self : Optional[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a__ : List[str] = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
a__ : Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
a__ : int = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
a__ : Optional[Any] = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : Dict = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
a__ : Optional[Any] = "xa\u0303y" + " " + "x\xe3y"
a__ : Optional[int] = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : int = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on unicode of space type
a__ : str = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
a__ : Any = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : int = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on unicode of line break type
a__ : Union[str, Any] = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
a__ : List[Any] = tokenizer_s.tokenize(lowerCamelCase__ )
a__ : int = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a__ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a__ : Tuple = f'''{text_of_1_token} {text_of_1_token}'''
a__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , )
a__ : Union[str, Any] = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
a__ : Optional[Any] = f''' {text}'''
a__ : str = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , )
a__ : Dict = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ) + 1, 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
def _UpperCamelCase( self : int ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCamelCase__ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def _UpperCamelCase( self : int ):
super().test_tokenization_python_rust_equals()
def _UpperCamelCase( self : str ):
# CLIP always lower cases letters
pass
| 37 | '''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase__ ( A_ ):
def decorator(A_ ):
UpperCAmelCase_ = getattr(A_ , "handle_key" , [] )
handle += [key]
setattr(A_ , "handle_key" , A_ )
return func
return decorator
def lowerCamelCase__ ( *A_ ):
def decorator(A_ ):
UpperCAmelCase_ = getattr(A_ , "handle_key" , [] )
handle += keys
setattr(A_ , "handle_key" , A_ )
return func
return decorator
class lowercase_ ( _A ):
def __new__( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = super().__new__(cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not hasattr(UpperCamelCase__ , "key_handler" ):
setattr(UpperCamelCase__ , "key_handler" , {} )
setattr(UpperCamelCase__ , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
UpperCAmelCase_ = getattr(UpperCamelCase__ , "handle_key" , [] )
for key in handled_keys:
UpperCAmelCase_ = value
return new_cls
@staticmethod
def lowerCamelCase_ ( cls ) -> str:
"""simple docstring"""
UpperCAmelCase_ = get_character()
if char != KEYMAP["undefined"]:
UpperCAmelCase_ = ord(UpperCamelCase__ )
UpperCAmelCase_ = cls.key_handler.get(UpperCamelCase__ )
if handler:
UpperCAmelCase_ = char
return handler(cls )
else:
return None
def lowerCamelCase__ ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 660 | 0 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = LongformerTokenizer
lowerCamelCase__ = True
lowerCamelCase__ = LongformerTokenizerFast
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : Optional[int] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : int = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : Any = {"""unk_token""": """<unk>"""}
snake_case__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = """lower newer"""
snake_case__ : Dict = """lower newer"""
return input_text, output_text
def __UpperCamelCase ( self ):
snake_case__ : int = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : Tuple = """lower newer"""
snake_case__ : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case__ : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokens + [tokenizer.unk_token]
snake_case__ : List[Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
snake_case__ : int = tokenizer.encode("""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : int = """Encode this sequence."""
snake_case__ : Union[str, Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
snake_case__ : Optional[int] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
snake_case__ : List[str] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
snake_case__ : List[str] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
snake_case__ : Dict = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
snake_case__ : str = """Encode <mask> sequence"""
snake_case__ : Tuple = """Encode <mask>sequence"""
snake_case__ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : str = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """A, <mask> AllenNLP sentence."""
snake_case__ : str = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
snake_case__ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
snake_case__ : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def __UpperCamelCase ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case__ : List[str] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""trim_offsets"""] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case__ : Any = f"{text_of_1_token} {text_of_1_token}"
snake_case__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case__ : Dict = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 38 | '''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__snake_case : Optional[Any] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowercase_ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=1_6 , UpperCamelCase__=1_3 , UpperCamelCase__=7 , UpperCamelCase__=1_4 , UpperCamelCase__=1_0 , UpperCamelCase__=1_9 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=True , UpperCamelCase__=1_6 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=[1, 2, 3, 4, 5] , UpperCamelCase__=2_5 , UpperCamelCase__=5 , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = d_model
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = prediction_length
UpperCAmelCase_ = context_length
UpperCAmelCase_ = cardinality
UpperCAmelCase_ = num_time_features
UpperCAmelCase_ = lags_sequence
UpperCAmelCase_ = embedding_dimension
UpperCAmelCase_ = is_training
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = context_length
UpperCAmelCase_ = prediction_length + label_length
UpperCAmelCase_ = label_length
UpperCAmelCase_ = moving_average
UpperCAmelCase_ = autocorrelation_factor
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = config.context_length + max(config.lags_sequence )
UpperCAmelCase_ = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
UpperCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
UpperCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length] )
UpperCAmelCase_ = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.prepare_autoformer_inputs_dict(UpperCamelCase__ )
return config, inputs_dict
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModel(config=UpperCamelCase__ ).to(UpperCamelCase__ ).eval()
UpperCAmelCase_ = model(**UpperCamelCase__ )
UpperCAmelCase_ = outputs.encoder_last_hidden_state
UpperCAmelCase_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = model.get_encoder()
encoder.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = AutoformerEncoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = model.create_network_inputs(**UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
UpperCAmelCase_ = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
UpperCAmelCase_ = encoder(inputs_embeds=UpperCamelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
UpperCAmelCase_ = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
UpperCAmelCase_ = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
UpperCAmelCase_ = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
UpperCAmelCase_ = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = model.get_decoder()
decoder.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = AutoformerDecoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCAmelCase_ = decoder(
trend=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowercase_ ( _A , _A , unittest.TestCase ):
a_ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a_ = (AutoformerForPrediction,) if is_torch_available() else ()
a_ = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model_class.from_pretrained(UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
self.assertEqual(info["missing_keys"] , [] )
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCamelCase__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
pass
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = inspect.signature(getattr(UpperCamelCase__ , "forward" ) )
# The main input is the name of the argument after `self`
UpperCAmelCase_ = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCamelCase__ )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(UpperCamelCase__ )] , UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
UpperCAmelCase_ = getattr(self.model_tester , "seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "decoder_seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "encoder_seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "d_model" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "num_attention_heads" , UpperCamelCase__ )
UpperCAmelCase_ = d_model // num_attention_heads
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
UpperCAmelCase_ = len(UpperCamelCase__ )
UpperCAmelCase_ = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# decoder attentions
UpperCAmelCase_ = outputs.decoder_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
UpperCAmelCase_ = outputs.cross_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 2 , len(UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowerCamelCase__ ( A_="train-batch.pt" ):
UpperCAmelCase_ = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=A_ , repo_type="dataset" )
UpperCAmelCase_ = torch.load(A_ , map_location=A_ )
return batch
@require_torch
@slow
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch()
with torch.no_grad():
UpperCAmelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
UpperCAmelCase_ = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
UpperCAmelCase_ = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase_ = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
UpperCAmelCase_ = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=UpperCamelCase__ )
UpperCAmelCase_ = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCamelCase__ , rtol=1e-1 ) )
| 660 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowerCAmelCase_ = logging.get_logger(__name__)
@dataclass
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : Dict , **_UpperCamelCase : List[Any] ) ->Any:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
snake_case_ = deprecated_arg[3:]
snake_case_ = not kwargs.pop(_UpperCamelCase )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
snake_case_ = kwargs.pop('''tpu_name''' , self.tpu_name )
snake_case_ = kwargs.pop('''device_idx''' , self.device_idx )
snake_case_ = kwargs.pop('''eager_mode''' , self.eager_mode )
snake_case_ = kwargs.pop('''use_xla''' , self.use_xla )
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE : str = field(
default=__A , metadata={"help": "Name of TPU"} , )
SCREAMING_SNAKE_CASE : int = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
SCREAMING_SNAKE_CASE : bool = field(default=__A , metadata={"help": "Benchmark models in eager model."} )
SCREAMING_SNAKE_CASE : bool = field(
default=__A , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def snake_case__( self : Optional[Any] ) ->Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ['''tf'''] )
snake_case_ = None
if self.tpu:
try:
if self.tpu_name:
snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
snake_case_ = None
return tpu
@cached_property
def snake_case__( self : Tuple ) ->Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ['''tf'''] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
snake_case_ = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' )
snake_case_ = tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU
snake_case_ = tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' )
return strategy
@property
def snake_case__( self : Dict ) ->bool:
requires_backends(self , ['''tf'''] )
return self._setup_tpu is not None
@property
def snake_case__( self : Union[str, Any] ) ->"tf.distribute.Strategy":
requires_backends(self , ['''tf'''] )
return self._setup_strategy
@property
def snake_case__( self : Optional[Any] ) ->Optional[Any]:
requires_backends(self , ['''tf'''] )
return tf.config.list_physical_devices('''GPU''' )
@property
def snake_case__( self : Dict ) ->int:
requires_backends(self , ['''tf'''] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def snake_case__( self : Union[str, Any] ) ->bool:
return self.n_gpu > 0 | 39 | '''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : Tuple = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
__snake_case : Tuple = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
__snake_case : Dict = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def lowerCamelCase__ ( A_ , A_ ):
with open(A_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
with open(A_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(A_ ):
UpperCAmelCase_ = b
UpperCAmelCase_ = idx
for wd in b:
UpperCAmelCase_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowercase_ ( _A ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|startoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ) -> int:
"""simple docstring"""
super().__init__(
unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , do_clean_text=UpperCamelCase__ , **UpperCamelCase__ , )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
UpperCAmelCase_ = do_clean_text
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = load_vocab_and_emoji(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return len(self.raw_vocab )
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(UpperCamelCase__ , clean=self.do_clean_text )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> int:
"""simple docstring"""
return self.vocab.get(UpperCamelCase__ , self.vocab.get(self.unk_token ) )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = "".join(UpperCamelCase__ ).strip()
return out_string
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ = 0
if os.path.isdir(UpperCamelCase__ ):
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase_ = token_index
writer.write(",".join(UpperCamelCase__ ) + "\n" )
index += 1
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , UpperCamelCase__ )
return vocab_file, emoji_file
class lowercase_ ( _A ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = vocab # same as swe
UpperCAmelCase_ = ids_to_tokens # same as bpe
UpperCAmelCase_ = emoji
UpperCAmelCase_ = np.max([len(UpperCamelCase__ ) for w in self.vocab.keys()] )
UpperCAmelCase_ = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
UpperCAmelCase_ = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
UpperCAmelCase_ = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
UpperCAmelCase_ = re.compile(
R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
UpperCAmelCase_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
UpperCAmelCase_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
UpperCAmelCase_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self ) -> int:
"""simple docstring"""
return len(self.ids_to_tokens )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = self.content_repattera.sub("<URL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<EMAIL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<TEL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<PRICE>" , UpperCamelCase__ )
UpperCAmelCase_ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=False ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace("\r\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\r" , "<BR>" )
UpperCAmelCase_ = text.replace("\t" , "<TAB>" )
UpperCAmelCase_ = text.replace("—" , "ー" )
UpperCAmelCase_ = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase_ = text.replace(UpperCamelCase__ , UpperCamelCase__ )
if clean:
UpperCAmelCase_ = self.clean_text(UpperCamelCase__ )
def check_simbol(UpperCamelCase__ ):
UpperCAmelCase_ = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 2:
UpperCAmelCase_ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc_2_a_1 and c <= 0Xc_2_b_f)
or (c >= 0Xc_7_8_0 and c <= 0Xc_7_8_3)
or (c >= 0Xc_a_b_9 and c <= 0Xc_b_b_f)
or (c >= 0Xc_c_8_0 and c <= 0Xc_d_a_2)
):
return True
return False
def checkuae(UpperCamelCase__ ):
UpperCAmelCase_ = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 3:
UpperCAmelCase_ = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe_2_8_0_8_0 and c <= 0Xe_2_b_0_7_f:
return True
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
while pos < len(UpperCamelCase__ ):
UpperCAmelCase_ = min(len(UpperCamelCase__ ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
UpperCAmelCase_ = [] # (token_id, token, pos)
for e in range(UpperCamelCase__ , UpperCamelCase__ , -1 ):
UpperCAmelCase_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(UpperCamelCase__ ) > 2:
UpperCAmelCase_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(UpperCamelCase__ ) > 0:
# the smallest token_id is adopted
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[0] )[0]
result.append(UpperCamelCase__ )
UpperCAmelCase_ = e
else:
UpperCAmelCase_ = pos + 1
UpperCAmelCase_ = text[pos:end]
if check_simbol(UpperCamelCase__ ):
result.append("<KIGOU>" )
elif checkuae(UpperCamelCase__ ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
UpperCAmelCase_ = end
return result
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__="\n" ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(UpperCamelCase__ )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = "".join(UpperCamelCase__ )
return text
| 660 | 0 |
from math import pi
def UpperCamelCase ( snake_case__ : int , snake_case__ : int ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 40 | '''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__snake_case : Union[str, Any] = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def lowerCamelCase__ ( ):
UpperCAmelCase_ = Github(os.environ["GITHUB_TOKEN"] )
UpperCAmelCase_ = g.get_repo("huggingface/diffusers" )
UpperCAmelCase_ = repo.get_issues(state="open" )
for issue in open_issues:
UpperCAmelCase_ = sorted(issue.get_comments() , key=lambda A_ : i.created_at , reverse=A_ )
UpperCAmelCase_ = comments[0] if len(A_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 660 | 0 |
'''simple docstring'''
import operator as op
lowerCAmelCase__ = '''scaler.pt'''
lowerCAmelCase__ = '''pytorch_model'''
lowerCAmelCase__ = '''random_states'''
lowerCAmelCase__ = '''optimizer'''
lowerCAmelCase__ = '''scheduler'''
lowerCAmelCase__ = '''pytorch_model.bin'''
lowerCAmelCase__ = '''pytorch_model.bin.index.json'''
lowerCAmelCase__ = '''model.safetensors'''
lowerCAmelCase__ = '''model.safetensors.index.json'''
lowerCAmelCase__ = '''1.10.2'''
lowerCAmelCase__ = '''py38'''
lowerCAmelCase__ = '''4.17.0'''
lowerCAmelCase__ = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
lowerCAmelCase__ = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
lowerCAmelCase__ = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
lowerCAmelCase__ = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
lowerCAmelCase__ = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
lowerCAmelCase__ = '''2.0.1'''
lowerCAmelCase__ = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
lowerCAmelCase__ = ['''default''', '''reduce-overhead''', '''max-autotune''']
lowerCAmelCase__ = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
lowerCAmelCase__ = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
lowerCAmelCase__ = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
lowerCAmelCase__ = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 41 | '''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__snake_case : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase_ ( datasets.BuilderConfig ):
a_ = 1_0000
a_ = None
a_ = None
class lowercase_ ( datasets.ArrowBasedBuilder ):
a_ = ParquetConfig
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCAmelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase__ , (str, list, tuple) ):
UpperCAmelCase_ = data_files
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCAmelCase_ = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCamelCase__ ):
with open(UpperCamelCase__ , "rb" ) as f:
UpperCAmelCase_ = datasets.Features.from_arrow_schema(pq.read_schema(UpperCamelCase__ ) )
break
splits.append(datasets.SplitGenerator(name=UpperCamelCase__ , gen_kwargs={"files": files} ) )
return splits
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase_ = table_cast(UpperCamelCase__ , self.info.features.arrow_schema )
return pa_table
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase__ ) ):
with open(UpperCamelCase__ , "rb" ) as f:
UpperCAmelCase_ = pq.ParquetFile(UpperCamelCase__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
UpperCAmelCase_ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(UpperCamelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCamelCase__ )}: {e}""" )
raise
| 660 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = CTRLTokenizer
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self ) -> str:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
lowerCamelCase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCamelCase_ = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
lowerCamelCase_ = {'unk_token': '<unk>'}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE_ ) )
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = 'adapt react readapt apt'
lowerCamelCase_ = 'adapt react readapt apt'
return input_text, output_text
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase_ = 'adapt react readapt apt'
lowerCamelCase_ = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
lowerCamelCase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
| 42 | '''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : Tuple = {'''vocab_file''': '''spiece.model'''}
__snake_case : Dict = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
__snake_case : Tuple = {
'''AI-Sweden/gpt-sw3-126m''': 20_48,
'''AI-Sweden/gpt-sw3-350m''': 20_48,
'''AI-Sweden/gpt-sw3-1.6b''': 20_48,
'''AI-Sweden/gpt-sw3-6.7b''': 20_48,
'''AI-Sweden/gpt-sw3-20b''': 20_48,
}
class lowercase_ ( _A ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
"""simple docstring"""
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase_ = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
UpperCAmelCase_ = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
UpperCAmelCase_ = "<|endoftext|>" if eos_token is None else eos_token
UpperCAmelCase_ = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
UpperCAmelCase_ = unk_token if pad_token is None else pad_token
UpperCAmelCase_ = eos_token if bos_token is None else bos_token
else:
UpperCAmelCase_ = "<pad>" if pad_token is None else pad_token
UpperCAmelCase_ = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# Used for whitespace normalization in input texts
# fmt : off
UpperCAmelCase_ = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
UpperCAmelCase_ = re.compile(
F"""[{"".join(map(UpperCamelCase__ , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]""" )
def __getstate__( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
return len(self.sp_model )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = self.non_printing_characters_re.sub("" , UpperCamelCase__ )
# Normalize whitespaces
UpperCAmelCase_ = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
UpperCAmelCase_ = unicodedata.normalize("NFC" , UpperCamelCase__ )
return text
def lowerCamelCase_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.preprocess_text(UpperCamelCase__ )
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCamelCase__ )
@staticmethod
def lowerCamelCase_ ( UpperCamelCase__ ) -> str:
"""simple docstring"""
return out_string
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = ""
UpperCAmelCase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
UpperCAmelCase_ = True
UpperCAmelCase_ = []
else:
current_sub_tokens.append(UpperCamelCase__ )
UpperCAmelCase_ = False
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string
def lowerCamelCase_ ( self ) -> Dict[str, int]:
"""simple docstring"""
UpperCAmelCase_ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = self.preprocess_text(UpperCamelCase__ )
UpperCAmelCase_ = self.sp_model.encode(UpperCamelCase__ )
else:
UpperCAmelCase_ = [self.preprocess_text(UpperCamelCase__ ) for t in text]
UpperCAmelCase_ = self.sp_model.encode(UpperCamelCase__ )
if return_tensors is True or return_tensors == "pt":
UpperCAmelCase_ = torch.tensor(UpperCamelCase__ )
return token_ids
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
return self.sp_model.decode(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
UpperCAmelCase_ = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(UpperCamelCase__ ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=UpperCamelCase__ )
| 660 | 0 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a ( UpperCamelCase__ ):
def __init__( self: int , *UpperCamelCase_: str , UpperCamelCase_: List[str]=None , UpperCamelCase_: int=None , **UpperCamelCase_: Optional[Any] ) -> List[str]:
"""simple docstring"""
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = eval_examples
lowercase__ = post_process_function
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[Dataset] = None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: str = "eval" , **UpperCamelCase_: int , ) -> Dict[str, float]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
lowercase__ = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
lowercase__ = gen_kwargs
lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ = self.get_eval_dataloader(UpperCamelCase_ )
lowercase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
else:
lowercase__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ )
return metrics
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: List[str]=None , UpperCamelCase_: str = "test" , **UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = self.get_test_dataloader(UpperCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , '''predict''' )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ )
| 43 | '''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=3 , UpperCamelCase__=1_8 , UpperCamelCase__=3_0 , UpperCamelCase__=4_0_0 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=[0.5, 0.5, 0.5] , UpperCamelCase__=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 1_8}
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
def lowerCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase_ ( _A , unittest.TestCase ):
a_ = LevitImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = LevitImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "size" ) )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
pass
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 660 | 0 |
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = input('Enter image url: ').strip()
print(f'''Downloading image from {url} ...''')
UpperCAmelCase_ : int = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
UpperCAmelCase_ : Any = soup.find('meta', {'property': 'og:image'})['content']
UpperCAmelCase_ : List[str] = requests.get(image_url).content
UpperCAmelCase_ : Tuple = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(f'''Done. Image saved to disk as {file_name}.''') | 44 | '''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 100 , ):
UpperCAmelCase_ = x_start
UpperCAmelCase_ = fnc(A_ )
UpperCAmelCase_ = 0.0
for _ in range(A_ ):
# Approximates curve as a sequence of linear lines and sums their length
UpperCAmelCase_ = (x_end - x_start) / steps + xa
UpperCAmelCase_ = fnc(A_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
UpperCAmelCase_ = xa
UpperCAmelCase_ = fxa
return length
if __name__ == "__main__":
def lowerCamelCase__ ( A_ ):
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
__snake_case : List[Any] = 10
while i <= 10_00_00:
print(F'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 660 | 0 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
UpperCamelCase = "Usage of script: script_name <size_of_canvas:int>"
UpperCamelCase = [0] * 100 + [1] * 10
random.shuffle(choice)
def A ( lowercase__ : int ) -> list[list[bool]]:
UpperCamelCase__ :Union[str, Any] = [[False for i in range(lowercase__ )] for j in range(lowercase__ )]
return canvas
def A ( lowercase__ : list[list[bool]] ) -> None:
for i, row in enumerate(lowercase__ ):
for j, _ in enumerate(lowercase__ ):
UpperCamelCase__ :Tuple = bool(random.getrandbits(1 ) )
def A ( lowercase__ : list[list[bool]] ) -> list[list[bool]]:
UpperCamelCase__ :List[Any] = np.array(lowercase__ )
UpperCamelCase__ :Dict = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(lowercase__ ):
for c, pt in enumerate(lowercase__ ):
UpperCamelCase__ :Optional[int] = __judge_point(
lowercase__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
UpperCamelCase__ :Dict = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
UpperCamelCase__ :list[list[bool]] = current_canvas.tolist()
return return_canvas
def A ( lowercase__ : bool , lowercase__ : list[list[bool]] ) -> bool:
UpperCamelCase__ :List[Any] = 0
UpperCamelCase__ :Any = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
UpperCamelCase__ :List[Any] = pt
if pt:
if alive < 2:
UpperCamelCase__ :List[str] = False
elif alive == 2 or alive == 3:
UpperCamelCase__ :int = True
elif alive > 3:
UpperCamelCase__ :List[Any] = False
else:
if alive == 3:
UpperCamelCase__ :Any = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
UpperCamelCase = int(sys.argv[1])
# main working structure of this module.
UpperCamelCase = create_canvas(canvas_size)
seed(c)
UpperCamelCase , UpperCamelCase = plt.subplots()
fig.show()
UpperCamelCase = ListedColormap(["w", "k"])
try:
while True:
UpperCamelCase = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass | 45 | '''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowercase_ ( _A ):
a_ = """"""
a_ = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Tuple:
"""simple docstring"""
super().__init__(self , **UpperCamelCase__ )
UpperCAmelCase_ = repo_info
UpperCAmelCase_ = token
UpperCAmelCase_ = None
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
if self.dir_cache is None:
UpperCAmelCase_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase_ = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(UpperCamelCase__ ): {"name": str(UpperCamelCase__ ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = "rb" , **UpperCamelCase__ , ) -> Optional[int]:
"""simple docstring"""
if not isinstance(self.repo_info , UpperCamelCase__ ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
UpperCAmelCase_ = hf_hub_url(self.repo_info.id , UpperCamelCase__ , revision=self.repo_info.sha )
return fsspec.open(
UpperCamelCase__ , mode=UpperCamelCase__ , headers=get_authentication_headers_for_url(UpperCamelCase__ , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCamelCase_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
self._get_dirs()
UpperCAmelCase_ = self._strip_protocol(UpperCamelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=False , **UpperCamelCase__ ) -> str:
"""simple docstring"""
self._get_dirs()
UpperCAmelCase_ = PurePosixPath(path.strip("/" ) )
UpperCAmelCase_ = {}
for p, f in self.dir_cache.items():
UpperCAmelCase_ = PurePosixPath(p.strip("/" ) )
UpperCAmelCase_ = p.parent
if root == path:
UpperCAmelCase_ = f
UpperCAmelCase_ = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660 | 0 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_lowerCAmelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_lowerCAmelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
_lowerCAmelCase : set[int] = {ord(char) for char in VALID_CHARS}
_lowerCAmelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str | None:
'''simple docstring'''
_lowerCamelCase : str = ""
_lowerCamelCase : int
_lowerCamelCase : int
_lowerCamelCase : int
for keychar, cipherchar in zip(cycle(_lowerCamelCase ) , _lowerCamelCase ):
_lowerCamelCase : Optional[Any] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_lowerCamelCase )
return decoded
def lowerCamelCase_( _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : list[str] = []
for key in product(_lowerCamelCase , repeat=3 ):
_lowerCamelCase : int = try_key(_lowerCamelCase , _lowerCamelCase )
if encoded is not None:
possibles.append(_lowerCamelCase )
return possibles
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def lowerCamelCase_( _lowerCamelCase = "p059_cipher.txt" ) -> int:
'''simple docstring'''
_lowerCamelCase : list[int]
_lowerCamelCase : list[str]
_lowerCamelCase : str
_lowerCamelCase : str
_lowerCamelCase : str = Path(_lowerCamelCase ).parent.joinpath(_lowerCamelCase ).read_text(encoding="utf-8" )
_lowerCamelCase : Optional[int] = [int(_lowerCamelCase ) for number in data.strip().split("," )]
_lowerCamelCase : List[Any] = filter_valid_chars(_lowerCamelCase )
for common_word in COMMON_WORDS:
_lowerCamelCase : Union[str, Any] = filter_common_word(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) == 1:
break
_lowerCamelCase : List[str] = possibles[0]
return sum(ord(_lowerCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''') | 46 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Union[str, Any] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 660 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 47 | '''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
class lowercase_ ( _A ):
a_ = """linear"""
a_ = """cosine"""
a_ = """cosine_with_restarts"""
a_ = """polynomial"""
a_ = """constant"""
a_ = """constant_with_warmup"""
a_ = """piecewise_constant"""
def lowerCamelCase__ ( A_ , A_ = -1 ):
return LambdaLR(A_ , lambda A_ : 1 , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1.0 , A_ ) )
return 1.0
return LambdaLR(A_ , A_ , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ = -1 ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = step_rules.split("," )
for rule_str in rule_list[:-1]:
UpperCAmelCase_ , UpperCAmelCase_ = rule_str.split(":" )
UpperCAmelCase_ = int(A_ )
UpperCAmelCase_ = float(A_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = float(rule_list[-1] )
def create_rules_function(A_ , A_ ):
def rule_func(A_ ) -> float:
UpperCAmelCase_ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(A_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
UpperCAmelCase_ = create_rules_function(A_ , A_ )
return LambdaLR(A_ , A_ , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=-1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 0.5 , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(A_ ) * 2.0 * progress )) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 1 , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(A_ ) * progress) % 1.0) )) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=1e-7 , A_=1.0 , A_=-1 ):
UpperCAmelCase_ = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
UpperCAmelCase_ = lr_init - lr_end
UpperCAmelCase_ = num_training_steps - num_warmup_steps
UpperCAmelCase_ = 1 - (current_step - num_warmup_steps) / decay_steps
UpperCAmelCase_ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(A_ , A_ , A_ )
__snake_case : str = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( A_ , A_ , A_ = None , A_ = None , A_ = None , A_ = 1 , A_ = 1.0 , A_ = -1 , ):
UpperCAmelCase_ = SchedulerType(A_ )
UpperCAmelCase_ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(A_ , last_epoch=A_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(A_ , step_rules=A_ , last_epoch=A_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(A_ , num_warmup_steps=A_ , last_epoch=A_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , num_cycles=A_ , last_epoch=A_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , power=A_ , last_epoch=A_ , )
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , last_epoch=A_ )
| 660 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ : List[str] = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 48 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case : Optional[int] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__snake_case : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 660 | 0 |
"""simple docstring"""
from math import sqrt
def lowercase__ ( snake_case_ :int ):
__UpperCAmelCase = 0
for i in range(1 , int(sqrt(snake_case_ ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case_ ):
total += i + n // i
elif i == sqrt(snake_case_ ):
total += i
return total - n
def lowercase__ ( snake_case_ :int = 10_000 ):
__UpperCAmelCase = sum(
i
for i in range(1 , snake_case_ )
if sum_of_divisors(sum_of_divisors(snake_case_ ) ) == i and sum_of_divisors(snake_case_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 49 | '''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case : Union[str, Any] = ''''''
__snake_case : List[Any] = ''''''
__snake_case : List[str] = ''''''
__snake_case : Any = ''''''
def lowerCamelCase__ ( A_ ):
# authorize twitter, initialize tweepy
UpperCAmelCase_ = tweepy.OAuthHandler(A_ , A_ )
auth.set_access_token(A_ , A_ )
UpperCAmelCase_ = tweepy.API(A_ )
# initialize a list to hold all the tweepy Tweets
UpperCAmelCase_ = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCAmelCase_ = api.user_timeline(screen_name=A_ , count=200 )
# save most recent tweets
alltweets.extend(A_ )
# save the id of the oldest tweet less one
UpperCAmelCase_ = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(A_ ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
UpperCAmelCase_ = api.user_timeline(
screen_name=A_ , count=200 , max_id=A_ )
# save most recent tweets
alltweets.extend(A_ )
# update the id of the oldest tweet less one
UpperCAmelCase_ = alltweets[-1].id - 1
print(F"""...{len(A_ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCAmelCase_ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , "w" ) as f:
UpperCAmelCase_ = csv.writer(A_ )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(A_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 660 | 0 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
UpperCamelCase : str = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
UpperCamelCase : Optional[Any] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
UpperCamelCase : Union[str, Any] = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCamelCase__ (datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ,id="""sequence""" ),
"""references""": datasets.Value("""string""" ,id="""sequence""" ),
} ) ,reference_urls=[] ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowerCamelCase__ = np.array([re.sub(_lowerCAmelCase ,"""""" ,_lowerCAmelCase ) for x in predictions] )
lowerCamelCase__ = np.array([re.sub(_lowerCAmelCase ,"""""" ,_lowerCAmelCase ) for x in references] )
else:
lowerCamelCase__ = np.asarray(_lowerCAmelCase )
lowerCamelCase__ = np.asarray(_lowerCAmelCase )
if ignore_case:
lowerCamelCase__ = np.char.lower(_lowerCAmelCase )
lowerCamelCase__ = np.char.lower(_lowerCAmelCase )
if ignore_punctuation:
lowerCamelCase__ = string.punctuation.maketrans("""""" ,"""""" ,string.punctuation )
lowerCamelCase__ = np.char.translate(_lowerCAmelCase ,table=_lowerCAmelCase )
lowerCamelCase__ = np.char.translate(_lowerCAmelCase ,table=_lowerCAmelCase )
if ignore_numbers:
lowerCamelCase__ = string.digits.maketrans("""""" ,"""""" ,string.digits )
lowerCamelCase__ = np.char.translate(_lowerCAmelCase ,table=_lowerCAmelCase )
lowerCamelCase__ = np.char.translate(_lowerCAmelCase ,table=_lowerCAmelCase )
lowerCamelCase__ = predictions == references
return {"exact_match": np.mean(_lowerCAmelCase ) * 1_00}
| 50 | '''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__snake_case : int = logging.get_logger(__name__)
class lowercase_ ( _A ):
def __init__( self , **UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["bs4"] )
super().__init__(**UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCAmelCase_ = parent.find_all(child.name , recursive=UpperCamelCase__ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(UpperCamelCase__ ) else next(i for i, s in enumerate(UpperCamelCase__ , 1 ) if s is child ) )
UpperCAmelCase_ = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = BeautifulSoup(UpperCamelCase__ , "html.parser" )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for element in html_code.descendants:
if type(UpperCamelCase__ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCAmelCase_ = html.unescape(UpperCamelCase__ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = self.xpath_soup(UpperCamelCase__ )
stringaxtag_seq.append(UpperCamelCase__ )
stringaxsubs_seq.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = ""
for tagname, subs in zip(UpperCamelCase__ , UpperCamelCase__ ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , UpperCamelCase__ ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase_ = False
# Check that strings has a valid type
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = True
elif isinstance(UpperCamelCase__ , (list, tuple) ):
if len(UpperCamelCase__ ) == 0 or isinstance(html_strings[0] , UpperCamelCase__ ):
UpperCAmelCase_ = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
F"""but is of type {type(UpperCamelCase__ )}.""" )
UpperCAmelCase_ = bool(isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(html_strings[0] , UpperCamelCase__ )) )
if not is_batched:
UpperCAmelCase_ = [html_strings]
# Get nodes + xpaths
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for html_string in html_strings:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.get_three_from_single(UpperCamelCase__ )
nodes.append(UpperCamelCase__ )
UpperCAmelCase_ = []
for node, tag_list, sub_list in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = self.construct_xpath(UpperCamelCase__ , UpperCamelCase__ )
xpath_strings.append(UpperCamelCase__ )
xpaths.append(UpperCamelCase__ )
# return as Dict
UpperCAmelCase_ = {"nodes": nodes, "xpaths": xpaths}
UpperCAmelCase_ = BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
return encoded_inputs
| 660 | 0 |
'''simple docstring'''
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
"""simple docstring"""
UpperCAmelCase = 0
UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ )
for i in range(n - 1 ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Tuple:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) <= 1:
return arr, 0
UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ ) // 2
UpperCAmelCase = arr[0:mid]
UpperCAmelCase = arr[mid:]
UpperCAmelCase, UpperCAmelCase = count_inversions_recursive(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase, UpperCAmelCase = count_inversions_recursive(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase, UpperCAmelCase = _count_cross_inversions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = UpperCAmelCase = UpperCAmelCase = 0
while i < len(SCREAMING_SNAKE_CASE_ ) and j < len(SCREAMING_SNAKE_CASE_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(SCREAMING_SNAKE_CASE_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(SCREAMING_SNAKE_CASE_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def __snake_case ( ) -> Dict:
"""simple docstring"""
UpperCAmelCase = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
UpperCAmelCase = count_inversions_bf(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase, UpperCAmelCase = count_inversions_recursive(SCREAMING_SNAKE_CASE_ )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , SCREAMING_SNAKE_CASE_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
UpperCAmelCase = count_inversions_bf(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase, UpperCAmelCase = count_inversions_recursive(SCREAMING_SNAKE_CASE_ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , SCREAMING_SNAKE_CASE_ )
# an empty list should also have zero inversions
UpperCAmelCase = []
UpperCAmelCase = count_inversions_bf(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase, UpperCAmelCase = count_inversions_recursive(SCREAMING_SNAKE_CASE_ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 51 | '''simple docstring'''
def lowerCamelCase__ ( A_ , A_ ):
_validate_point(A_ )
_validate_point(A_ )
if len(A_ ) != len(A_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(A_ , A_ ) ) )
def lowerCamelCase__ ( A_ ):
if point:
if isinstance(A_ , A_ ):
for item in point:
if not isinstance(A_ , (int, float) ):
UpperCAmelCase_ = (
"Expected a list of numbers as input, found "
F"""{type(A_ ).__name__}"""
)
raise TypeError(A_ )
else:
UpperCAmelCase_ = F"""Expected a list of numbers as input, found {type(A_ ).__name__}"""
raise TypeError(A_ )
else:
raise ValueError("Missing an input" )
def lowerCamelCase__ ( A_ , A_ ):
_validate_point(A_ )
_validate_point(A_ )
if len(A_ ) != len(A_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(A_ , A_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''PerceiverFeatureExtractor''']
A = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 52 | '''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__snake_case : Optional[int] = logging.getLogger(__name__)
def lowerCamelCase__ ( A_ , A_ ):
# save results
if os.path.exists(A_ ):
if os.path.exists(os.path.join(A_ , "config.json" ) ) and os.path.isfile(
os.path.join(A_ , "config.json" ) ):
os.remove(os.path.join(A_ , "config.json" ) )
if os.path.exists(os.path.join(A_ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(A_ , "pytorch_model.bin" ) ):
os.remove(os.path.join(A_ , "pytorch_model.bin" ) )
else:
os.makedirs(A_ )
model.save_pretrained(A_ )
def lowerCamelCase__ ( A_ , A_=False ):
UpperCAmelCase_ = 2
if unlogit:
UpperCAmelCase_ = torch.pow(A_ , A_ )
UpperCAmelCase_ = p * torch.log(A_ )
UpperCAmelCase_ = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase__ ( A_ ):
logger.info("lv, h >\t" + "\t".join(F"""{x + 1}""" for x in range(len(A_ ) ) ) )
for row in range(len(A_ ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=True , A_=True , A_=None , A_=False ):
UpperCAmelCase_ , UpperCAmelCase_ = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase_ = torch.zeros(A_ , A_ ).to(args.device )
UpperCAmelCase_ = torch.zeros(A_ , A_ ).to(args.device )
if head_mask is None:
UpperCAmelCase_ = torch.ones(A_ , A_ ).to(args.device )
head_mask.requires_grad_(requires_grad=A_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase_ = None
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 0.0
for step, inputs in enumerate(tqdm(A_ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase_ = tuple(t.to(args.device ) for t in inputs )
((UpperCAmelCase_) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase_ = model(A_ , labels=A_ , head_mask=A_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(A_ ):
UpperCAmelCase_ = entropy(attn.detach() , A_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(A_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase_ = 2
UpperCAmelCase_ = torch.pow(torch.pow(A_ , A_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
UpperCAmelCase_ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(A_ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(A_ )
logger.info("Head ranked by importance scores" )
UpperCAmelCase_ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase_ = torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase_ = head_ranks.view_as(A_ )
print_ad_tensor(A_ )
return attn_entropy, head_importance, total_loss
def lowerCamelCase__ ( A_ , A_ , A_ ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(A_ , A_ , A_ , compute_entropy=A_ )
UpperCAmelCase_ = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , A_ , original_score * args.masking_threshold )
UpperCAmelCase_ = torch.ones_like(A_ )
UpperCAmelCase_ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase_ = original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase_ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase_ = float("Inf" )
UpperCAmelCase_ = head_importance.view(-1 ).sort()[1]
if len(A_ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
UpperCAmelCase_ = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase_ = new_head_mask.view(-1 )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = new_head_mask.view_as(A_ )
UpperCAmelCase_ = new_head_mask.clone().detach()
print_ad_tensor(A_ )
# Compute metric and head importance again
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , head_mask=A_ )
UpperCAmelCase_ = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , A_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(A_ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase__ ( A_ , A_ , A_ , A_ ):
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(A_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(A_ , A_ ):
UpperCAmelCase_ = [
v,
]
assert sum(len(A_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(A_ )
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ , actually_pruned=A_ , )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , A_ , A_ , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , A_ , A_ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(A_ , args.output_dir )
def lowerCamelCase__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=A_ , type=A_ , required=A_ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=A_ , type=A_ , required=A_ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=A_ , type=A_ , required=A_ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=A_ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=A_ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=A_ , type=A_ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=A_ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=A_ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=A_ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=A_ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=A_ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=A_ , help="Batch size." )
parser.add_argument("--seed" , type=A_ , default=42 )
parser.add_argument("--local_rank" , type=A_ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=A_ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=A_ , default="" , help="Can be used for distant debugging." )
UpperCAmelCase_ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase_ = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
UpperCAmelCase_ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase_ = torch.device("cuda" , args.local_rank )
UpperCAmelCase_ = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase_ = nn.parallel.DistributedDataParallel(
A_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=A_ )
elif args.n_gpu > 1:
UpperCAmelCase_ = nn.DataParallel(A_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=A_ )
torch.save(A_ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , A_ )
# Prepare dataset
UpperCAmelCase_ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase_ = (torch.from_numpy(A_ ),)
UpperCAmelCase_ = TensorDataset(*A_ )
UpperCAmelCase_ = RandomSampler(A_ )
UpperCAmelCase_ = DataLoader(A_ , sampler=A_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(A_ , A_ , A_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase_ = mask_heads(A_ , A_ , A_ )
prune_heads(A_ , A_ , A_ , A_ )
if __name__ == "__main__":
main()
| 660 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = DiTPipeline
a_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
a_ = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
a_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
a_ = False
def lowercase ( self : Tuple ) -> List[Any]:
torch.manual_seed(0 )
__lowerCAmelCase = TransformeraDModel(
sample_size=1_6 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCAmelCase_ , activation_fn='gelu-approximate' , num_embeds_ada_norm=1_0_0_0 , norm_type='ada_norm_zero' , norm_elementwise_affine=lowerCAmelCase_ , )
__lowerCAmelCase = AutoencoderKL()
__lowerCAmelCase = DDIMScheduler()
__lowerCAmelCase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def lowercase ( self : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple=0 ) -> int:
if str(lowerCAmelCase_ ).startswith('mps' ):
__lowerCAmelCase = torch.manual_seed(lowerCAmelCase_ )
else:
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
__lowerCAmelCase = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = 'cpu'
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 1_6, 1_6, 3) )
__lowerCAmelCase = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
__lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase_ , 1e-3 )
def lowercase ( self : List[Any] ) -> Tuple:
self._test_inference_batch_single_identical(relax_max_difference=lowerCAmelCase_ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase ( self : List[str] ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Optional[Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : str ) -> List[str]:
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
__lowerCAmelCase = ['vase', 'umbrella', 'white shark', 'white wolf']
__lowerCAmelCase = pipe.get_label_ids(lowerCAmelCase_ )
__lowerCAmelCase = pipe(lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=4_0 , output_type='np' ).images
for word, image in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowerCAmelCase = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def lowercase ( self : int ) -> int:
__lowerCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
__lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
__lowerCAmelCase = ['vase', 'umbrella']
__lowerCAmelCase = pipe.get_label_ids(lowerCAmelCase_ )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pipe(lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=2_5 , output_type='np' ).images
for word, image in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 53 | '''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__snake_case : str = logging.getLogger(__name__)
def lowerCamelCase__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=A_ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=A_ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=A_ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=A_ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase_ = parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
UpperCAmelCase_ = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase_ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase_ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase_ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase_ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase_ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase_ = fp.readlines()
logger.info("Start encoding" )
logger.info(F"""{len(A_ )} examples to process.""" )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
UpperCAmelCase_ = 10_000
UpperCAmelCase_ = time.time()
for text in data:
UpperCAmelCase_ = F"""{bos} {text.strip()} {sep}"""
UpperCAmelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
rslt.append(A_ )
iter += 1
if iter % interval == 0:
UpperCAmelCase_ = time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
UpperCAmelCase_ = time.time()
logger.info("Finished binarization" )
logger.info(F"""{len(A_ )} examples processed.""" )
UpperCAmelCase_ = F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
UpperCAmelCase_ = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase_ = [np.uintaa(A_ ) for d in rslt]
else:
UpperCAmelCase_ = [np.intaa(A_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(A_ , "wb" ) as handle:
pickle.dump(rslt_ , A_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 660 | 0 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__lowercase : str =[
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
__lowercase : Any =[
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
__lowercase : Tuple =(
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
__lowercase : Dict =(
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
__lowercase : Tuple =[
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
for tf_name, hf_name in patterns:
UpperCAmelCase_ =k.replace(lowercase__ , lowercase__ )
return k
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =BigBirdPegasusConfig(**lowercase__ )
UpperCAmelCase_ =BigBirdPegasusForConditionalGeneration(lowercase__ )
UpperCAmelCase_ =torch_model.state_dict()
UpperCAmelCase_ ={}
# separating decoder weights
UpperCAmelCase_ ={k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
UpperCAmelCase_ ={k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ):
UpperCAmelCase_ =[k.endswith(lowercase__ ) for ending in KEYS_TO_IGNORE]
if any(lowercase__ ):
continue
UpperCAmelCase_ =DECODER_PATTERNS
UpperCAmelCase_ =rename_state_dict_key(lowercase__ , lowercase__ )
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
UpperCAmelCase_ =v.T
UpperCAmelCase_ =torch.from_numpy(lowercase__ )
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ):
UpperCAmelCase_ =[k.endswith(lowercase__ ) for ending in KEYS_TO_IGNORE]
if any(lowercase__ ):
continue
UpperCAmelCase_ =REMAINING_PATTERNS
UpperCAmelCase_ =rename_state_dict_key(lowercase__ , lowercase__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
UpperCAmelCase_ =v.T
UpperCAmelCase_ =torch.from_numpy(lowercase__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
UpperCAmelCase_ =mapping["model.embed_positions.weight"]
UpperCAmelCase_ =mapping.pop("model.embed_positions.weight" )
UpperCAmelCase_ , UpperCAmelCase_ =torch_model.load_state_dict(lowercase__ , strict=lowercase__ )
UpperCAmelCase_ =[
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =tf.train.list_variables(lowercase__ )
UpperCAmelCase_ ={}
UpperCAmelCase_ =["global_step"]
for name, shape in tqdm(lowercase__ , desc="converting tf checkpoint to dict" ):
UpperCAmelCase_ =any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCAmelCase_ =tf.train.load_variable(lowercase__ , lowercase__ )
UpperCAmelCase_ =array
return tf_weights
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =get_tf_weights_as_numpy(lowercase__ )
UpperCAmelCase_ =convert_bigbird_pegasus(lowercase__ , lowercase__ )
torch_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__lowercase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
__lowercase : Dict =parser.parse_args()
__lowercase : List[str] ={}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 54 | '''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__snake_case : Dict = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__snake_case : str = json.load(f)
@require_torch
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = FSMTForConditionalGeneration.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = F"""facebook/wmt19-{pair}"""
UpperCAmelCase_ = self.get_tokenizer(UpperCamelCase__ )
UpperCAmelCase_ = self.get_model(UpperCamelCase__ )
UpperCAmelCase_ = bleu_data[pair]["src"]
UpperCAmelCase_ = bleu_data[pair]["tgt"]
UpperCAmelCase_ = tokenizer(UpperCamelCase__ , return_tensors="pt" , truncation=UpperCamelCase__ , padding="longest" ).to(UpperCamelCase__ )
UpperCAmelCase_ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase_ = tokenizer.batch_decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
UpperCAmelCase_ = calculate_bleu(UpperCamelCase__ , UpperCamelCase__ )
print(UpperCamelCase__ )
self.assertGreaterEqual(scores["bleu"] , UpperCamelCase__ )
| 660 | 0 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Dict ,A : Optional[Any] ,):
__A = parent
__A = 13
__A = 7
__A = 30
__A = self.seq_length + self.mem_len
__A = 15
__A = True
__A = True
__A = 99
__A = [10, 50, 80]
__A = 32
__A = 32
__A = 4
__A = 8
__A = 1_28
__A = 2
__A = 2
__A = None
__A = 1
__A = 0
__A = 3
__A = self.vocab_size - 1
__A = 0.01
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__A = TransfoXLConfig(
vocab_size=self.vocab_size ,mem_len=self.mem_len ,clamp_len=self.clamp_len ,cutoffs=self.cutoffs ,d_model=self.hidden_size ,d_embed=self.d_embed ,n_head=self.num_attention_heads ,d_head=self.d_head ,d_inner=self.d_inner ,div_val=self.div_val ,n_layer=self.num_hidden_layers ,eos_token_id=self.eos_token_id ,pad_token_id=self.vocab_size - 1 ,init_range=self.init_range ,num_labels=self.num_labels ,)
return (config, input_ids_a, input_ids_a, lm_labels)
def UpperCamelCase_ ( self : Any ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def UpperCamelCase_ ( self : str ,A : List[Any] ,A : str ,A : Optional[Any] ,A : Union[str, Any] ):
__A = TFTransfoXLModel(A )
__A , __A = model(A ).to_tuple()
__A = {"input_ids": input_ids_a, "mems": mems_a}
__A , __A = model(A ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def UpperCamelCase_ ( self : Optional[Any] ,A : Dict ,A : Any ,A : Any ,A : Optional[Any] ):
__A = TFTransfoXLLMHeadModel(A )
__A , __A = model(A ).to_tuple()
__A = {"input_ids": input_ids_a, "labels": lm_labels}
__A , __A = model(A ).to_tuple()
__A , __A = model([input_ids_a, mems_a] ).to_tuple()
__A = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
__A , __A = model(A ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def UpperCamelCase_ ( self : Optional[Any] ,A : Tuple ,A : str ,A : Union[str, Any] ,A : Optional[int] ):
__A = TFTransfoXLForSequenceClassification(A )
__A = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : int ):
__A = self.prepare_config_and_inputs()
((__A) , (__A) , (__A) , (__A)) = config_and_inputs
__A = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
snake_case_ = () if is_tf_available() else ()
snake_case_ = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : List[str] ,A : str ,A : Optional[Any] ,A : Any ,A : Optional[Any] ,A : List[Any] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def UpperCamelCase_ ( self : Optional[Any] ):
__A = TFTransfoXLModelTester(self )
__A = ConfigTester(self ,config_class=A ,d_embed=37 )
def UpperCamelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Tuple ):
self.model_tester.set_seed()
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*A )
def UpperCamelCase_ ( self : str ):
self.model_tester.set_seed()
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*A )
def UpperCamelCase_ ( self : int ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*A )
def UpperCamelCase_ ( self : Tuple ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
__A = model_class(A )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
__A = model.get_output_embeddings()
assert isinstance(A ,tf.keras.layers.Layer )
__A = model.get_bias()
assert name is None
else:
__A = model.get_output_embeddings()
assert x is None
__A = model.get_bias()
assert name is None
def UpperCamelCase_ ( self : Union[str, Any] ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def UpperCamelCase_ ( self : List[Any] ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = TFTransfoXLModel.from_pretrained(A )
self.assertIsNotNone(A )
@unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss." )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("Skip test until #12651 is resolved." )
@slow
def UpperCamelCase_ ( self : List[Any] ):
__A = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103" )
# fmt: off
__A = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] ,dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
__A = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
__A = model.generate(A ,max_length=2_00 ,do_sample=A )
self.assertListEqual(output_ids[0].numpy().tolist() ,A )
| 55 | '''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__snake_case : List[Any] = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Optional[int] = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : int = {
'''sample_size''': 2_56,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Dict = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : Tuple = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : str = {
'''num_train_timesteps''': 1_51,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def lowerCamelCase__ ( A_ ):
if isinstance(A_ , A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=False ):
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=None ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.bias"""]
UpperCAmelCase_ = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = (
checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ ):
UpperCAmelCase_ = torch.load(A_ , map_location="cpu" )
UpperCAmelCase_ = {}
UpperCAmelCase_ = checkpoint["time_embed.0.weight"]
UpperCAmelCase_ = checkpoint["time_embed.0.bias"]
UpperCAmelCase_ = checkpoint["time_embed.2.weight"]
UpperCAmelCase_ = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase_ = checkpoint["label_emb.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.bias"]
UpperCAmelCase_ = unet_config["down_block_types"]
UpperCAmelCase_ = unet_config["layers_per_block"]
UpperCAmelCase_ = unet_config["attention_head_dim"]
UpperCAmelCase_ = unet_config["block_out_channels"]
UpperCAmelCase_ = 1
UpperCAmelCase_ = channels_list[0]
for i, layer_type in enumerate(A_ ):
UpperCAmelCase_ = channels_list[i]
UpperCAmelCase_ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""down_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""down_blocks.{i}.downsamplers.0"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
current_layer += 1
UpperCAmelCase_ = current_channels
# hardcoded the mid-block for now
UpperCAmelCase_ = "mid_block.resnets.0"
UpperCAmelCase_ = "middle_block.0"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.attentions.0"
UpperCAmelCase_ = "middle_block.1"
UpperCAmelCase_ = convert_attention(A_ , A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.resnets.1"
UpperCAmelCase_ = "middle_block.2"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = 0
UpperCAmelCase_ = unet_config["up_block_types"]
for i, layer_type in enumerate(A_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.1"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""up_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.2"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = checkpoint["out.0.weight"]
UpperCAmelCase_ = checkpoint["out.0.bias"]
UpperCAmelCase_ = checkpoint["out.2.weight"]
UpperCAmelCase_ = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
__snake_case : List[str] = parser.parse_args()
__snake_case : Any = strabool(args.class_cond)
__snake_case : List[str] = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
__snake_case : Optional[int] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__snake_case : List[str] = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
__snake_case : Optional[Any] = None
__snake_case : Optional[int] = con_pt_to_diffuser(args.unet_path, unet_config)
__snake_case : str = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__snake_case : Tuple = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__snake_case : Optional[int] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
__snake_case : Optional[Any] = CMStochasticIterativeScheduler(**scheduler_config)
__snake_case : Dict = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 660 | 0 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_a : int = get_tests_dir("fixtures/test_sentencepiece.model")
_a : Dict = {"target_lang": "fi", "source_lang": "en"}
_a : Optional[int] = ">>zh<<"
_a : List[str] = "Helsinki-NLP/"
if is_torch_available():
_a : List[str] = "pt"
elif is_tf_available():
_a : Dict = "tf"
else:
_a : Union[str, Any] = "jax"
@require_sentencepiece
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : int = MarianTokenizer
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
def a ( self : int ) -> int:
super().setUp()
__snake_case = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['vocab'] )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['source_spm'] )
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['target_spm'] )
__snake_case = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : str , SCREAMING_SNAKE_CASE_ : List[str] ) -> List[Any]:
return (
"This is a test",
"This is a test",
)
def a ( self : int ) -> Optional[Any]:
__snake_case = '</s>'
__snake_case = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> List[str]:
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 9 )
def a ( self : List[Any] ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def a ( self : Any ) -> Optional[int]:
__snake_case = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
__snake_case = en_de_tokenizer(['I am a small frog'] , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , batch.input_ids[0] )
__snake_case = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = [x.name for x in Path(SCREAMING_SNAKE_CASE_ ).glob('*' )]
self.assertIn('source.spm' , SCREAMING_SNAKE_CASE_ )
MarianTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Any:
__snake_case = self.get_tokenizer()
__snake_case = tok(
['I am a small frog' * 1000, 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def a ( self : Tuple ) -> Dict:
__snake_case = self.get_tokenizer()
__snake_case = tok(['I am a tiny frog', 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def a ( self : int ) -> int:
# fmt: off
__snake_case = {'input_ids': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def a ( self : Dict ) -> str:
__snake_case = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' )
__snake_case = 'Tämä on testi'
__snake_case = 'This is a test'
__snake_case = [76, 7, 2047, 2]
__snake_case = [69, 12, 11, 940, 2]
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(text_target=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | '''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
__snake_case : Any = _symbol_database.Default()
__snake_case : Dict = _descriptor_pool.Default().AddSerializedFile(
B'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'''
)
__snake_case : Union[str, Any] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
__snake_case : Any = None
__snake_case : Dict = B'''H\003'''
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
__snake_case : Union[str, Any] = 45
__snake_case : str = 15_81
__snake_case : Optional[int] = 15_17
__snake_case : Optional[Any] = 15_70
__snake_case : Union[str, Any] = 15_84
__snake_case : Any = 17_93
__snake_case : Optional[int] = 17_95
__snake_case : Tuple = 19_16
__snake_case : int = 18_64
__snake_case : Any = 19_05
__snake_case : Optional[int] = 19_19
__snake_case : str = 24_29
__snake_case : Tuple = 22_08
__snake_case : str = 24_18
__snake_case : Tuple = 23_23
__snake_case : Optional[int] = 24_07
# @@protoc_insertion_point(module_scope)
| 660 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=1_8 , _lowerCamelCase=3_0 , _lowerCamelCase=4_0_0 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , ):
UpperCamelCase_: str = size if size is not None else {'shortest_edge': 2_0}
UpperCamelCase_: str = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
UpperCamelCase_: List[str] = parent
UpperCamelCase_: Union[str, Any] = batch_size
UpperCamelCase_: Tuple = num_channels
UpperCamelCase_: Dict = image_size
UpperCamelCase_: str = min_resolution
UpperCamelCase_: Dict = max_resolution
UpperCamelCase_: Dict = do_resize
UpperCamelCase_: Dict = size
UpperCamelCase_: Union[str, Any] = do_center_crop
UpperCamelCase_: Union[str, Any] = crop_size
def _a ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Any =MobileNetVaImageProcessor if is_vision_available() else None
def _a ( self ):
UpperCamelCase_: List[Any] = MobileNetVaImageProcessingTester(self )
@property
def _a ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ):
UpperCamelCase_: Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'size' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'crop_size' ) )
def _a ( self ):
UpperCamelCase_: Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 2_0} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
UpperCamelCase_: List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def _a ( self ):
pass
def _a ( self ):
# Initialize image_processing
UpperCamelCase_: Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_: Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
UpperCamelCase_: List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCamelCase_: Tuple = image_processing(_lowerCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _a ( self ):
# Initialize image_processing
UpperCamelCase_: Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_: Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
UpperCamelCase_: Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCamelCase_: Optional[Any] = image_processing(_lowerCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _a ( self ):
# Initialize image_processing
UpperCamelCase_: Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_: Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
UpperCamelCase_: int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCamelCase_: Dict = image_processing(_lowerCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , ) | 57 | '''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase_ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = "The dog is cute and lives in the garden house"
UpperCAmelCase_ = jnp.array([tokenizer.encode(UpperCamelCase__ )] )
UpperCAmelCase_ = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
UpperCAmelCase_ = model(UpperCamelCase__ )["last_hidden_state"]
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1e-3 ) )
| 660 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Optional[int] = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58 | '''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A_ , A_ , A_ ):
# Construct model
if gpta_config_file == "":
UpperCAmelCase_ = GPTaConfig()
else:
UpperCAmelCase_ = GPTaConfig.from_json_file(A_ )
UpperCAmelCase_ = GPTaModel(A_ )
# Load weights from numpy
load_tf_weights_in_gpta(A_ , A_ , A_ )
# Save pytorch-model
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , A_ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(A_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
__snake_case : Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "convbert"
def __init__(self : List[str] , UpperCAmelCase_ : Any=30_522 , UpperCAmelCase_ : Optional[int]=768 , UpperCAmelCase_ : Optional[int]=12 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : List[str]=3_072 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=512 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : List[str]=1E-1_2 , UpperCAmelCase_ : Optional[int]=1 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Optional[int]=768 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Optional[int]=9 , UpperCAmelCase_ : List[str]=1 , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Optional[int] , ) ->List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase__: Any =vocab_size
lowerCamelCase__: Optional[Any] =hidden_size
lowerCamelCase__: List[Any] =num_hidden_layers
lowerCamelCase__: List[str] =num_attention_heads
lowerCamelCase__: Dict =intermediate_size
lowerCamelCase__: List[Any] =hidden_act
lowerCamelCase__: List[str] =hidden_dropout_prob
lowerCamelCase__: List[Any] =attention_probs_dropout_prob
lowerCamelCase__: Union[str, Any] =max_position_embeddings
lowerCamelCase__: str =type_vocab_size
lowerCamelCase__: str =initializer_range
lowerCamelCase__: Optional[Any] =layer_norm_eps
lowerCamelCase__: Optional[int] =embedding_size
lowerCamelCase__: List[str] =head_ratio
lowerCamelCase__: Optional[Any] =conv_kernel_size
lowerCamelCase__: int =num_groups
lowerCamelCase__: List[str] =classifier_dropout
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__: Any ={0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__: str ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 59 | '''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase__ ( A_ ):
def decorator(A_ ):
UpperCAmelCase_ = getattr(A_ , "handle_key" , [] )
handle += [key]
setattr(A_ , "handle_key" , A_ )
return func
return decorator
def lowerCamelCase__ ( *A_ ):
def decorator(A_ ):
UpperCAmelCase_ = getattr(A_ , "handle_key" , [] )
handle += keys
setattr(A_ , "handle_key" , A_ )
return func
return decorator
class lowercase_ ( _A ):
def __new__( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = super().__new__(cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not hasattr(UpperCamelCase__ , "key_handler" ):
setattr(UpperCamelCase__ , "key_handler" , {} )
setattr(UpperCamelCase__ , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
UpperCAmelCase_ = getattr(UpperCamelCase__ , "handle_key" , [] )
for key in handled_keys:
UpperCAmelCase_ = value
return new_cls
@staticmethod
def lowerCamelCase_ ( cls ) -> str:
"""simple docstring"""
UpperCAmelCase_ = get_character()
if char != KEYMAP["undefined"]:
UpperCAmelCase_ = ord(UpperCamelCase__ )
UpperCAmelCase_ = cls.key_handler.get(UpperCamelCase__ )
if handler:
UpperCAmelCase_ = char
return handler(cls )
else:
return None
def lowerCamelCase__ ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 660 | 0 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCAmelCase_ = logging.getLogger(__name__)
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0_5_2_2, type=int)
lowerCAmelCase_ = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
lowerCAmelCase_ = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
lowerCAmelCase_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowerCAmelCase_ = [0] * args.vocab_size
for k, v in counter.items():
lowerCAmelCase_ = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 60 | '''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__snake_case : Optional[Any] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowercase_ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=1_6 , UpperCamelCase__=1_3 , UpperCamelCase__=7 , UpperCamelCase__=1_4 , UpperCamelCase__=1_0 , UpperCamelCase__=1_9 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=True , UpperCamelCase__=1_6 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=[1, 2, 3, 4, 5] , UpperCamelCase__=2_5 , UpperCamelCase__=5 , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = d_model
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = prediction_length
UpperCAmelCase_ = context_length
UpperCAmelCase_ = cardinality
UpperCAmelCase_ = num_time_features
UpperCAmelCase_ = lags_sequence
UpperCAmelCase_ = embedding_dimension
UpperCAmelCase_ = is_training
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = context_length
UpperCAmelCase_ = prediction_length + label_length
UpperCAmelCase_ = label_length
UpperCAmelCase_ = moving_average
UpperCAmelCase_ = autocorrelation_factor
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = config.context_length + max(config.lags_sequence )
UpperCAmelCase_ = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
UpperCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
UpperCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length] )
UpperCAmelCase_ = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.prepare_autoformer_inputs_dict(UpperCamelCase__ )
return config, inputs_dict
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModel(config=UpperCamelCase__ ).to(UpperCamelCase__ ).eval()
UpperCAmelCase_ = model(**UpperCamelCase__ )
UpperCAmelCase_ = outputs.encoder_last_hidden_state
UpperCAmelCase_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = model.get_encoder()
encoder.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = AutoformerEncoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = model.create_network_inputs(**UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
UpperCAmelCase_ = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
UpperCAmelCase_ = encoder(inputs_embeds=UpperCamelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
UpperCAmelCase_ = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
UpperCAmelCase_ = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
UpperCAmelCase_ = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
UpperCAmelCase_ = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = model.get_decoder()
decoder.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = AutoformerDecoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCAmelCase_ = decoder(
trend=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowercase_ ( _A , _A , unittest.TestCase ):
a_ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a_ = (AutoformerForPrediction,) if is_torch_available() else ()
a_ = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model_class.from_pretrained(UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
self.assertEqual(info["missing_keys"] , [] )
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCamelCase__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
pass
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = inspect.signature(getattr(UpperCamelCase__ , "forward" ) )
# The main input is the name of the argument after `self`
UpperCAmelCase_ = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCamelCase__ )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(UpperCamelCase__ )] , UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
UpperCAmelCase_ = getattr(self.model_tester , "seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "decoder_seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "encoder_seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "d_model" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "num_attention_heads" , UpperCamelCase__ )
UpperCAmelCase_ = d_model // num_attention_heads
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
UpperCAmelCase_ = len(UpperCamelCase__ )
UpperCAmelCase_ = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# decoder attentions
UpperCAmelCase_ = outputs.decoder_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
UpperCAmelCase_ = outputs.cross_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 2 , len(UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowerCamelCase__ ( A_="train-batch.pt" ):
UpperCAmelCase_ = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=A_ , repo_type="dataset" )
UpperCAmelCase_ = torch.load(A_ , map_location=A_ )
return batch
@require_torch
@slow
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch()
with torch.no_grad():
UpperCAmelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
UpperCAmelCase_ = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
UpperCAmelCase_ = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase_ = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
UpperCAmelCase_ = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=UpperCamelCase__ )
UpperCAmelCase_ = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCamelCase__ , rtol=1e-1 ) )
| 660 | 0 |
UpperCamelCase = range(2, 20 + 1)
UpperCamelCase = [10**k for k in range(ks[-1] + 1)]
UpperCamelCase = {}
def _A ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = sum(a_i[j] for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) )
lowerCAmelCase__ = sum(a_i[j] * base[j] for j in range(min(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) ) )
lowerCAmelCase__ , lowerCAmelCase__ = 0, 0
lowerCAmelCase__ = n - i
lowerCAmelCase__ = memo.get(lowerCAmelCase_ )
if sub_memo is not None:
lowerCAmelCase__ = sub_memo.get(lowerCAmelCase_ )
if jumps is not None and len(lowerCAmelCase_ ) > 0:
# find and make the largest jump without going over
lowerCAmelCase__ = -1
for _k in range(len(lowerCAmelCase_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowerCAmelCase__ = _k
break
if max_jump >= 0:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = jumps[max_jump]
# since the difference between jumps is cached, add c
lowerCAmelCase__ = diff + c
for j in range(min(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) ):
lowerCAmelCase__ , lowerCAmelCase__ = divmod(lowerCAmelCase_ , 10 )
if new_c > 0:
add(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
lowerCAmelCase__ = []
else:
lowerCAmelCase__ = {c: []}
lowerCAmelCase__ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowerCAmelCase__ , lowerCAmelCase__ = next_term(lowerCAmelCase_ , k - 1 , i + dn , lowerCAmelCase_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowerCAmelCase__ , lowerCAmelCase__ = compute(lowerCAmelCase_ , lowerCAmelCase_ , i + dn , lowerCAmelCase_ )
diff += _diff
dn += terms_jumped
lowerCAmelCase__ = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowerCAmelCase__ = 0
while j < len(lowerCAmelCase_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCAmelCase_ , (diff, dn, k) )
return (diff, dn)
def _A ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(lowerCAmelCase_ ):
a_i.extend([0 for _ in range(k - len(lowerCAmelCase_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowerCAmelCase__ = i
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0, 0, 0
for j in range(len(lowerCAmelCase_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowerCAmelCase__ = ds_c + ds_b
diff += addend
lowerCAmelCase__ = 0
for j in range(lowerCAmelCase_ ):
lowerCAmelCase__ = a_i[j] + addend
lowerCAmelCase__ , lowerCAmelCase__ = divmod(lowerCAmelCase_ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return diff, i - start_i
def _A ( lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
lowerCAmelCase__ = digits[j] + addend
if s >= 10:
lowerCAmelCase__ , lowerCAmelCase__ = divmod(lowerCAmelCase_ , 10 )
lowerCAmelCase__ = addend // 10 + quotient
else:
lowerCAmelCase__ = s
lowerCAmelCase__ = addend // 10
if addend == 0:
break
while addend > 0:
lowerCAmelCase__ , lowerCAmelCase__ = divmod(lowerCAmelCase_ , 10 )
digits.append(lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : int = 10**15 ):
"""simple docstring"""
lowerCAmelCase__ = [1]
lowerCAmelCase__ = 1
lowerCAmelCase__ = 0
while True:
lowerCAmelCase__ , lowerCAmelCase__ = next_term(lowerCAmelCase_ , 20 , i + dn , lowerCAmelCase_ )
dn += terms_jumped
if dn == n - i:
break
lowerCAmelCase__ = 0
for j in range(len(lowerCAmelCase_ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 61 | '''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : Tuple = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
__snake_case : Tuple = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
__snake_case : Dict = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def lowerCamelCase__ ( A_ , A_ ):
with open(A_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
with open(A_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(A_ ):
UpperCAmelCase_ = b
UpperCAmelCase_ = idx
for wd in b:
UpperCAmelCase_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowercase_ ( _A ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|startoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ) -> int:
"""simple docstring"""
super().__init__(
unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , do_clean_text=UpperCamelCase__ , **UpperCamelCase__ , )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
UpperCAmelCase_ = do_clean_text
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = load_vocab_and_emoji(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return len(self.raw_vocab )
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(UpperCamelCase__ , clean=self.do_clean_text )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> int:
"""simple docstring"""
return self.vocab.get(UpperCamelCase__ , self.vocab.get(self.unk_token ) )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = "".join(UpperCamelCase__ ).strip()
return out_string
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ = 0
if os.path.isdir(UpperCamelCase__ ):
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase_ = token_index
writer.write(",".join(UpperCamelCase__ ) + "\n" )
index += 1
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , UpperCamelCase__ )
return vocab_file, emoji_file
class lowercase_ ( _A ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = vocab # same as swe
UpperCAmelCase_ = ids_to_tokens # same as bpe
UpperCAmelCase_ = emoji
UpperCAmelCase_ = np.max([len(UpperCamelCase__ ) for w in self.vocab.keys()] )
UpperCAmelCase_ = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
UpperCAmelCase_ = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
UpperCAmelCase_ = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
UpperCAmelCase_ = re.compile(
R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
UpperCAmelCase_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
UpperCAmelCase_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
UpperCAmelCase_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self ) -> int:
"""simple docstring"""
return len(self.ids_to_tokens )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = self.content_repattera.sub("<URL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<EMAIL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<TEL>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , UpperCamelCase__ )
UpperCAmelCase_ = self.content_repattera.sub("<PRICE>" , UpperCamelCase__ )
UpperCAmelCase_ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=False ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace("\r\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\r" , "<BR>" )
UpperCAmelCase_ = text.replace("\t" , "<TAB>" )
UpperCAmelCase_ = text.replace("—" , "ー" )
UpperCAmelCase_ = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase_ = text.replace(UpperCamelCase__ , UpperCamelCase__ )
if clean:
UpperCAmelCase_ = self.clean_text(UpperCamelCase__ )
def check_simbol(UpperCamelCase__ ):
UpperCAmelCase_ = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 2:
UpperCAmelCase_ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc_2_a_1 and c <= 0Xc_2_b_f)
or (c >= 0Xc_7_8_0 and c <= 0Xc_7_8_3)
or (c >= 0Xc_a_b_9 and c <= 0Xc_b_b_f)
or (c >= 0Xc_c_8_0 and c <= 0Xc_d_a_2)
):
return True
return False
def checkuae(UpperCamelCase__ ):
UpperCAmelCase_ = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 3:
UpperCAmelCase_ = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe_2_8_0_8_0 and c <= 0Xe_2_b_0_7_f:
return True
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
while pos < len(UpperCamelCase__ ):
UpperCAmelCase_ = min(len(UpperCamelCase__ ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
UpperCAmelCase_ = [] # (token_id, token, pos)
for e in range(UpperCamelCase__ , UpperCamelCase__ , -1 ):
UpperCAmelCase_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(UpperCamelCase__ ) > 2:
UpperCAmelCase_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(UpperCamelCase__ ) > 0:
# the smallest token_id is adopted
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[0] )[0]
result.append(UpperCamelCase__ )
UpperCAmelCase_ = e
else:
UpperCAmelCase_ = pos + 1
UpperCAmelCase_ = text[pos:end]
if check_simbol(UpperCamelCase__ ):
result.append("<KIGOU>" )
elif checkuae(UpperCamelCase__ ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
UpperCAmelCase_ = end
return result
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__="\n" ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(UpperCamelCase__ )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = "".join(UpperCamelCase__ )
return text
| 660 | 0 |
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
SCREAMING_SNAKE_CASE : Any = ""
while len(lowercase ) % 3 != 0:
SCREAMING_SNAKE_CASE : Optional[int] = "0" + bin_string
SCREAMING_SNAKE_CASE : Union[str, Any] = [
bin_string[index : index + 3]
for index in range(len(lowercase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
SCREAMING_SNAKE_CASE : Tuple = 0
for index, val in enumerate(lowercase ):
oct_val += int(2 ** (2 - index) * int(lowercase ) )
oct_string += str(lowercase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 62 | '''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__snake_case : Union[str, Any] = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def lowerCamelCase__ ( ):
UpperCAmelCase_ = Github(os.environ["GITHUB_TOKEN"] )
UpperCAmelCase_ = g.get_repo("huggingface/diffusers" )
UpperCAmelCase_ = repo.get_issues(state="open" )
for issue in open_issues:
UpperCAmelCase_ = sorted(issue.get_comments() , key=lambda A_ : i.created_at , reverse=A_ )
UpperCAmelCase_ = comments[0] if len(A_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 660 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a : Optional[int] = logging.get_logger(__name__)
a : int = {"tokenizer_file": "tokenizer.json"}
a : List[Any] = {
"tokenizer_file": {
"bigscience/tokenizer": "https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json",
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json",
},
}
class a ( lowercase__ ):
"""simple docstring"""
a : Dict = VOCAB_FILES_NAMES
a : Tuple = PRETRAINED_VOCAB_FILES_MAP
a : Any = ['input_ids', 'attention_mask']
a : List[Any] = None
def __init__( self : List[str] , __lowercase : str=None , __lowercase : List[str]=None , __lowercase : int=None , __lowercase : Optional[Any]="<unk>" , __lowercase : Union[str, Any]="<s>" , __lowercase : int="</s>" , __lowercase : Any="<pad>" , __lowercase : List[str]=False , __lowercase : Tuple=False , **__lowercase : Any , ) -> Any:
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , add_prefix_space=__lowercase , clean_up_tokenization_spaces=__lowercase , **__lowercase , )
__UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __lowercase ) != add_prefix_space:
__UpperCAmelCase : Optional[Any] = getattr(__lowercase , pre_tok_state.pop("""type""" ) )
__UpperCAmelCase : Optional[Any] = add_prefix_space
__UpperCAmelCase : int = pre_tok_class(**__lowercase )
__UpperCAmelCase : int = add_prefix_space
def UpperCAmelCase ( self : Optional[Any] , *__lowercase : Tuple , **__lowercase : Optional[Any] ) -> BatchEncoding:
__UpperCAmelCase : List[str] = kwargs.get("""is_split_into_words""" , __lowercase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*__lowercase , **__lowercase )
def UpperCAmelCase ( self : Tuple , *__lowercase : Optional[int] , **__lowercase : Optional[int] ) -> BatchEncoding:
__UpperCAmelCase : int = kwargs.get("""is_split_into_words""" , __lowercase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
""" pretokenized inputs.""" )
return super()._encode_plus(*__lowercase , **__lowercase )
def UpperCAmelCase ( self : Optional[int] , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
__UpperCAmelCase : Union[str, Any] = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
def UpperCAmelCase ( self : Any , __lowercase : "Conversation" ) -> List[int]:
__UpperCAmelCase : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowercase , add_special_tokens=__lowercase ) + [self.eos_token_id] )
if len(__lowercase ) > self.model_max_length:
__UpperCAmelCase : Optional[Any] = input_ids[-self.model_max_length :]
return input_ids
| 63 | '''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__snake_case : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase_ ( datasets.BuilderConfig ):
a_ = 1_0000
a_ = None
a_ = None
class lowercase_ ( datasets.ArrowBasedBuilder ):
a_ = ParquetConfig
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCAmelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase__ , (str, list, tuple) ):
UpperCAmelCase_ = data_files
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCAmelCase_ = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCamelCase__ ):
with open(UpperCamelCase__ , "rb" ) as f:
UpperCAmelCase_ = datasets.Features.from_arrow_schema(pq.read_schema(UpperCamelCase__ ) )
break
splits.append(datasets.SplitGenerator(name=UpperCamelCase__ , gen_kwargs={"files": files} ) )
return splits
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase_ = table_cast(UpperCamelCase__ , self.info.features.arrow_schema )
return pa_table
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase__ ) ):
with open(UpperCamelCase__ , "rb" ) as f:
UpperCAmelCase_ = pq.ParquetFile(UpperCamelCase__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
UpperCAmelCase_ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(UpperCamelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCamelCase__ )}: {e}""" )
raise
| 660 | 0 |
def A__ ( snake_case_ : int = 10 , snake_case_ : int = 22 ):
SCREAMING_SNAKE_CASE__: List[str]= range(1 , snake_case_ )
SCREAMING_SNAKE_CASE__: str= range(1 , snake_case_ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(1_0, 2_2) = }''')
| 64 | '''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : Tuple = {'''vocab_file''': '''spiece.model'''}
__snake_case : Dict = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
__snake_case : Tuple = {
'''AI-Sweden/gpt-sw3-126m''': 20_48,
'''AI-Sweden/gpt-sw3-350m''': 20_48,
'''AI-Sweden/gpt-sw3-1.6b''': 20_48,
'''AI-Sweden/gpt-sw3-6.7b''': 20_48,
'''AI-Sweden/gpt-sw3-20b''': 20_48,
}
class lowercase_ ( _A ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
"""simple docstring"""
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase_ = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
UpperCAmelCase_ = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
UpperCAmelCase_ = "<|endoftext|>" if eos_token is None else eos_token
UpperCAmelCase_ = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
UpperCAmelCase_ = unk_token if pad_token is None else pad_token
UpperCAmelCase_ = eos_token if bos_token is None else bos_token
else:
UpperCAmelCase_ = "<pad>" if pad_token is None else pad_token
UpperCAmelCase_ = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# Used for whitespace normalization in input texts
# fmt : off
UpperCAmelCase_ = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
UpperCAmelCase_ = re.compile(
F"""[{"".join(map(UpperCamelCase__ , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]""" )
def __getstate__( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
return len(self.sp_model )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = self.non_printing_characters_re.sub("" , UpperCamelCase__ )
# Normalize whitespaces
UpperCAmelCase_ = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
UpperCAmelCase_ = unicodedata.normalize("NFC" , UpperCamelCase__ )
return text
def lowerCamelCase_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.preprocess_text(UpperCamelCase__ )
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCamelCase__ )
@staticmethod
def lowerCamelCase_ ( UpperCamelCase__ ) -> str:
"""simple docstring"""
return out_string
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = ""
UpperCAmelCase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
UpperCAmelCase_ = True
UpperCAmelCase_ = []
else:
current_sub_tokens.append(UpperCamelCase__ )
UpperCAmelCase_ = False
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string
def lowerCamelCase_ ( self ) -> Dict[str, int]:
"""simple docstring"""
UpperCAmelCase_ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = self.preprocess_text(UpperCamelCase__ )
UpperCAmelCase_ = self.sp_model.encode(UpperCamelCase__ )
else:
UpperCAmelCase_ = [self.preprocess_text(UpperCamelCase__ ) for t in text]
UpperCAmelCase_ = self.sp_model.encode(UpperCamelCase__ )
if return_tensors is True or return_tensors == "pt":
UpperCAmelCase_ = torch.tensor(UpperCamelCase__ )
return token_ids
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
return self.sp_model.decode(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
UpperCAmelCase_ = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(UpperCamelCase__ ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=UpperCamelCase__ )
| 660 | 0 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__UpperCamelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__UpperCamelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__UpperCamelCase )
return parser.parse_args()
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = parse_args()
# Import training_script as a module.
UpperCAmelCase__ : Union[str, Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCAmelCase__ : Any = script_fpath.stem
UpperCAmelCase__ : str = importlib.import_module(__UpperCamelCase )
# Patch sys.argv
UpperCAmelCase__ : Union[str, Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 65 | '''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=3 , UpperCamelCase__=1_8 , UpperCamelCase__=3_0 , UpperCamelCase__=4_0_0 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=[0.5, 0.5, 0.5] , UpperCamelCase__=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 1_8}
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
def lowerCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase_ ( _A , unittest.TestCase ):
a_ = LevitImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = LevitImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "size" ) )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
pass
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 660 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=1_8 , _lowerCAmelCase=3_0 , _lowerCAmelCase=4_0_0 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , ):
_lowercase : Optional[Any] = size if size is not None else {'shortest_edge': 1_8}
_lowercase : str = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
_lowercase : int = parent
_lowercase : List[str] = batch_size
_lowercase : List[Any] = num_channels
_lowercase : Any = image_size
_lowercase : List[Any] = min_resolution
_lowercase : List[Any] = max_resolution
_lowercase : Dict = do_resize
_lowercase : Optional[int] = size
_lowercase : int = do_center_crop
_lowercase : Optional[Any] = crop_size
_lowercase : Any = do_normalize
_lowercase : Union[str, Any] = image_mean
_lowercase : List[str] = image_std
def __a ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Tuple = LevitImageProcessor if is_vision_available() else None
def __a ( self ):
_lowercase : Tuple = LevitImageProcessingTester(self )
@property
def __a ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ):
_lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'size' ) )
def __a ( self ):
_lowercase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
_lowercase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def __a ( self ):
pass
def __a ( self ):
# Initialize image_processing
_lowercase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
_lowercase : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowercase : int = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __a ( self ):
# Initialize image_processing
_lowercase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
_lowercase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowercase : Optional[int] = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __a ( self ):
# Initialize image_processing
_lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
_lowercase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowercase : Union[str, Any] = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 66 | '''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 100 , ):
UpperCAmelCase_ = x_start
UpperCAmelCase_ = fnc(A_ )
UpperCAmelCase_ = 0.0
for _ in range(A_ ):
# Approximates curve as a sequence of linear lines and sums their length
UpperCAmelCase_ = (x_end - x_start) / steps + xa
UpperCAmelCase_ = fnc(A_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
UpperCAmelCase_ = xa
UpperCAmelCase_ = fxa
return length
if __name__ == "__main__":
def lowerCamelCase__ ( A_ ):
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
__snake_case : List[Any] = 10
while i <= 10_00_00:
print(F'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 660 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE_ : int = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] ,__A : List[Any]=5_0265 ,__A : str=512 ,__A : Optional[int]=8 ,__A : Any=2048 ,__A : Tuple=16 ,__A : str=8 ,__A : int=2048 ,__A : List[str]=16 ,__A : Optional[int]=0.0 ,__A : Any=0.0 ,__A : int=True ,__A : List[Any]=True ,__A : Tuple="gelu" ,__A : Any=512 ,__A : Dict=0.1 ,__A : Tuple=0.0 ,__A : int=0.0 ,__A : int=0.02 ,__A : Dict=1 ,__A : str=False ,__A : Dict=0 ,__A : Union[str, Any]=1 ,__A : Optional[int]=2 ,__A : List[str]=2 ,**__A : Tuple ,) -> Tuple:
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = d_model
_lowercase = encoder_ffn_dim
_lowercase = encoder_layers
_lowercase = encoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = activation_function
_lowercase = init_std
_lowercase = encoder_layerdrop
_lowercase = decoder_layerdrop
_lowercase = use_cache
_lowercase = encoder_layers
_lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,is_encoder_decoder=__A ,decoder_start_token_id=__A ,forced_eos_token_id=__A ,**__A ,)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase = {0: 'batch'}
_lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__A ,direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def __UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super().outputs
else:
_lowercase = super(__A ,self ).outputs
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self : Optional[int] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
# Generate decoder inputs
_lowercase = seq_length if not self.use_past else 1
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
_lowercase = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_lowercase = dict(**__A ,**__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
_lowercase = common_inputs['decoder_input_ids'].shape[1]
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = decoder_seq_length + 3
_lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowercase = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__A ,__A )] ,dim=1 )
_lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowercase , _lowercase = self.num_layers
_lowercase = min(__A ,__A )
_lowercase = max(__A ,__A ) - min_num_layers
_lowercase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__A ):
common_inputs["past_key_values"].append(
(
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
) )
# TODO: test this.
_lowercase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__A ,__A ):
common_inputs["past_key_values"].append((torch.zeros(__A ), torch.zeros(__A )) )
return common_inputs
def __UpperCAmelCase ( self : List[Any] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase = seqlen + 2
_lowercase , _lowercase = self.num_layers
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = common_inputs['attention_mask'].dtype
_lowercase = torch.cat(
[common_inputs['attention_mask'], torch.ones(__A ,__A ,dtype=__A )] ,dim=1 )
_lowercase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(__A )
]
return common_inputs
def __UpperCAmelCase ( self : Any ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase = tokenizer.num_special_tokens_to_add(__A )
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
_lowercase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowercase = dict(tokenizer(__A ,return_tensors=__A ) )
return common_inputs
def __UpperCAmelCase ( self : Dict ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
elif self.task == "causal-lm":
_lowercase = self._generate_dummy_inputs_for_causal_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
else:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
return common_inputs
def __UpperCAmelCase ( self : List[str] ,__A : Dict ,__A : Any ,__A : List[Any] ,__A : Tuple ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super()._flatten_past_key_values_(__A ,__A ,__A ,__A )
else:
_lowercase = super(__A ,self )._flatten_past_key_values_(
__A ,__A ,__A ,__A ) | 67 | '''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowercase_ ( _A ):
a_ = """"""
a_ = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Tuple:
"""simple docstring"""
super().__init__(self , **UpperCamelCase__ )
UpperCAmelCase_ = repo_info
UpperCAmelCase_ = token
UpperCAmelCase_ = None
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
if self.dir_cache is None:
UpperCAmelCase_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase_ = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(UpperCamelCase__ ): {"name": str(UpperCamelCase__ ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = "rb" , **UpperCamelCase__ , ) -> Optional[int]:
"""simple docstring"""
if not isinstance(self.repo_info , UpperCamelCase__ ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
UpperCAmelCase_ = hf_hub_url(self.repo_info.id , UpperCamelCase__ , revision=self.repo_info.sha )
return fsspec.open(
UpperCamelCase__ , mode=UpperCamelCase__ , headers=get_authentication_headers_for_url(UpperCamelCase__ , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCamelCase_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
self._get_dirs()
UpperCAmelCase_ = self._strip_protocol(UpperCamelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__=False , **UpperCamelCase__ ) -> str:
"""simple docstring"""
self._get_dirs()
UpperCAmelCase_ = PurePosixPath(path.strip("/" ) )
UpperCAmelCase_ = {}
for p, f in self.dir_cache.items():
UpperCAmelCase_ = PurePosixPath(p.strip("/" ) )
UpperCAmelCase_ = p.parent
if root == path:
UpperCAmelCase_ = f
UpperCAmelCase_ = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660 | 0 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__A = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
__A = 10
__A = 2_56
def lowercase__ ( A_: List[str] ) -> Optional[MinHash]:
"""simple docstring"""
if len(A_ ) < MIN_NUM_TOKENS:
return None
__UpperCAmelCase =MinHash(num_perm=A_ )
for token in set(A_ ):
min_hash.update(token.encode() )
return min_hash
def lowercase__ ( A_: str ) -> Set[str]:
"""simple docstring"""
return {t for t in NON_ALPHA.split(A_ ) if len(t.strip() ) > 0}
class _A :
"""simple docstring"""
def __init__( self : str , *,
__SCREAMING_SNAKE_CASE : float = 0.85 , ) -> Any:
__UpperCAmelCase =duplication_jaccard_threshold
__UpperCAmelCase =NUM_PERM
__UpperCAmelCase =MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__UpperCAmelCase =defaultdict(__SCREAMING_SNAKE_CASE )
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : MinHash ) -> None:
__UpperCAmelCase =self._index.query(__SCREAMING_SNAKE_CASE )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__SCREAMING_SNAKE_CASE )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__SCREAMING_SNAKE_CASE )
def _a ( self : str ) -> List[List[Dict]]:
__UpperCAmelCase =[]
for base, duplicates in self._duplicate_clusters.items():
__UpperCAmelCase =[base] + list(__SCREAMING_SNAKE_CASE )
# reformat the cluster to be a list of dict
__UpperCAmelCase =[{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__SCREAMING_SNAKE_CASE )
return duplicate_clusters
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> None:
__UpperCAmelCase =self.get_duplicate_clusters()
with open(__SCREAMING_SNAKE_CASE , """w""" ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase__ ( A_: Any ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase =element
__UpperCAmelCase =get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowercase__ ( A_: Type[Dataset] ) -> Optional[Any]:
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(A_ , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def lowercase__ ( A_: Type[Dataset] , A_: float ) -> Optional[Any]:
"""simple docstring"""
__UpperCAmelCase =DuplicationIndex(duplication_jaccard_threshold=A_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(A_ ) ) , max_queue_size=100 ) ):
di.add(A_ , A_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowercase__ ( A_: str , A_: str ) -> float:
"""simple docstring"""
__UpperCAmelCase =get_tokens(A_ )
__UpperCAmelCase =get_tokens(A_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__A = None
def lowercase__ ( A_: Any , A_: int ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase =[]
for elementa in cluster:
__UpperCAmelCase =_shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
__UpperCAmelCase =_shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(A_ , A_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__UpperCAmelCase =1
extremes.append(A_ )
return extremes
def lowercase__ ( A_: Dict , A_: int , A_: Any ) -> Tuple:
"""simple docstring"""
global _shared_dataset
__UpperCAmelCase =dataset
__UpperCAmelCase =[]
__UpperCAmelCase =partial(_find_cluster_extremes_shared , jaccard_threshold=A_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
A_ , A_ , ) , total=len(A_ ) , ):
extremes_list.append(A_ )
return extremes_list
def lowercase__ ( A_: Type[Dataset] , A_: float = 0.8_5 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
"""simple docstring"""
__UpperCAmelCase =make_duplicate_clusters(A_ , A_ )
__UpperCAmelCase ={x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
__UpperCAmelCase ={}
__UpperCAmelCase =find_extremes(A_ , A_ , A_ )
for extremes in extremes_clusters:
for element in extremes:
__UpperCAmelCase =element
__UpperCAmelCase =duplicate_indices - set(extreme_dict.keys() )
__UpperCAmelCase =dataset.filter(lambda A_ , A_ : idx not in remove_indices , with_indices=A_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__UpperCAmelCase =element["""base_index"""] in extreme_dict
if element["is_extreme"]:
__UpperCAmelCase =extreme_dict[element["""base_index"""]]["""copies"""]
print(F'''Original dataset size: {len(A_ )}''' )
print(F'''Number of duplicate clusters: {len(A_ )}''' )
print(F'''Files in duplicate cluster: {len(A_ )}''' )
print(F'''Unique files in duplicate cluster: {len(A_ )}''' )
print(F'''Filtered dataset size: {len(A_ )}''' )
return ds_filter, duplicate_clusters
| 68 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Union[str, Any] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 660 | 0 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
a : Any = logging.get_logger(__name__)
a : int = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
a : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """Model type selected in the list: """ + """, """.join(_UpperCamelCase )} )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
__SCREAMING_SNAKE_CASE = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=128 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
__SCREAMING_SNAKE_CASE = field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
__SCREAMING_SNAKE_CASE = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
__SCREAMING_SNAKE_CASE = field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
__SCREAMING_SNAKE_CASE = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
__SCREAMING_SNAKE_CASE = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """train"""
__SCREAMING_SNAKE_CASE = """dev"""
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
def __init__( self : str , a_ : SquadDataTrainingArguments , a_ : PreTrainedTokenizer , a_ : Optional[int] = None , a_ : Union[str, Split] = Split.train , a_ : Optional[bool] = False , a_ : Optional[str] = None , a_ : Optional[str] = "pt" , ):
"""simple docstring"""
__snake_case = args
__snake_case = is_language_sensitive
__snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(a_ , a_ ):
try:
__snake_case = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
__snake_case = mode
# Load data features from cache or dataset file
__snake_case = "v2" if args.version_2_with_negative else "v1"
__snake_case = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case = cached_features_file + ".lock"
with FileLock(a_ ):
if os.path.exists(a_ ) and not args.overwrite_cache:
__snake_case = time.time()
__snake_case = torch.load(a_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__snake_case = self.old_features["features"]
__snake_case = self.old_features.get("dataset" , a_ )
__snake_case = self.old_features.get("examples" , a_ )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
" future run" )
else:
if mode == Split.dev:
__snake_case = self.processor.get_dev_examples(args.data_dir )
else:
__snake_case = self.processor.get_train_examples(args.data_dir )
__snake_case , __snake_case = squad_convert_examples_to_features(
examples=self.examples , tokenizer=a_ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=a_ , )
__snake_case = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , a_ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : Optional[Any] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[str] , a_ : Optional[Any] ):
"""simple docstring"""
__snake_case = self.features[i]
__snake_case = torch.tensor(feature.input_ids , dtype=torch.long )
__snake_case = torch.tensor(feature.attention_mask , dtype=torch.long )
__snake_case = torch.tensor(feature.token_type_ids , dtype=torch.long )
__snake_case = torch.tensor(feature.cls_index , dtype=torch.long )
__snake_case = torch.tensor(feature.p_mask , dtype=torch.float )
__snake_case = torch.tensor(feature.is_impossible , dtype=torch.float )
__snake_case = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__snake_case = torch.tensor(feature.start_position , dtype=torch.long )
__snake_case = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 69 | '''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
class lowercase_ ( _A ):
a_ = """linear"""
a_ = """cosine"""
a_ = """cosine_with_restarts"""
a_ = """polynomial"""
a_ = """constant"""
a_ = """constant_with_warmup"""
a_ = """piecewise_constant"""
def lowerCamelCase__ ( A_ , A_ = -1 ):
return LambdaLR(A_ , lambda A_ : 1 , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1.0 , A_ ) )
return 1.0
return LambdaLR(A_ , A_ , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ = -1 ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = step_rules.split("," )
for rule_str in rule_list[:-1]:
UpperCAmelCase_ , UpperCAmelCase_ = rule_str.split(":" )
UpperCAmelCase_ = int(A_ )
UpperCAmelCase_ = float(A_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = float(rule_list[-1] )
def create_rules_function(A_ , A_ ):
def rule_func(A_ ) -> float:
UpperCAmelCase_ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(A_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
UpperCAmelCase_ = create_rules_function(A_ , A_ )
return LambdaLR(A_ , A_ , last_epoch=A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=-1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 0.5 , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(A_ ) * 2.0 * progress )) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ = 1 , A_ = -1 ):
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(A_ ) * progress) % 1.0) )) )
return LambdaLR(A_ , A_ , A_ )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=1e-7 , A_=1.0 , A_=-1 ):
UpperCAmelCase_ = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(A_ ):
if current_step < num_warmup_steps:
return float(A_ ) / float(max(1 , A_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
UpperCAmelCase_ = lr_init - lr_end
UpperCAmelCase_ = num_training_steps - num_warmup_steps
UpperCAmelCase_ = 1 - (current_step - num_warmup_steps) / decay_steps
UpperCAmelCase_ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(A_ , A_ , A_ )
__snake_case : str = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase__ ( A_ , A_ , A_ = None , A_ = None , A_ = None , A_ = 1 , A_ = 1.0 , A_ = -1 , ):
UpperCAmelCase_ = SchedulerType(A_ )
UpperCAmelCase_ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(A_ , last_epoch=A_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(A_ , step_rules=A_ , last_epoch=A_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(A_ , num_warmup_steps=A_ , last_epoch=A_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , num_cycles=A_ , last_epoch=A_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , power=A_ , last_epoch=A_ , )
return schedule_func(
A_ , num_warmup_steps=A_ , num_training_steps=A_ , last_epoch=A_ )
| 660 | 0 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def _SCREAMING_SNAKE_CASE ( lowercase : dict , lowercase : str , lowercase : set , lowercase : set , lowercase : dict , lowercase : dict , lowercase : PriorityQueue , lowercase : dict , lowercase : float | int , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase_ = cst_fwd.get(lowercase , np.inf )
lowerCamelCase_ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowerCamelCase_ = new_cost_f
lowerCamelCase_ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase_ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : str , lowercase : dict , lowercase : dict ):
'''simple docstring'''
lowerCamelCase_ = -1
lowerCamelCase_ = set()
lowerCamelCase_ = set()
lowerCamelCase_ = {source: 0}
lowerCamelCase_ = {destination: 0}
lowerCamelCase_ = {source: None}
lowerCamelCase_ = {destination: None}
lowerCamelCase_ = PriorityQueue()
lowerCamelCase_ = PriorityQueue()
lowerCamelCase_ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase_ , lowerCamelCase_ = queue_forward.get()
visited_forward.add(lowercase )
lowerCamelCase_ , lowerCamelCase_ = queue_backward.get()
visited_backward.add(lowercase )
lowerCamelCase_ = pass_and_relaxation(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
lowerCamelCase_ = pass_and_relaxation(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase_ = shortest_distance
return shortest_path_distance
lowerCamelCase : Optional[int] = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
lowerCamelCase : Optional[int] = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case : Optional[int] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__snake_case : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 660 | 0 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = [], []
while len(_SCREAMING_SNAKE_CASE ) > 1:
UpperCAmelCase_ , UpperCAmelCase_ : int = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE )
start.append(_SCREAMING_SNAKE_CASE )
end.append(_SCREAMING_SNAKE_CASE )
collection.remove(_SCREAMING_SNAKE_CASE )
collection.remove(_SCREAMING_SNAKE_CASE )
end.reverse()
return start + collection + end
if __name__ == "__main__":
_lowerCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
_lowerCamelCase = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 71 | '''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case : Union[str, Any] = ''''''
__snake_case : List[Any] = ''''''
__snake_case : List[str] = ''''''
__snake_case : Any = ''''''
def lowerCamelCase__ ( A_ ):
# authorize twitter, initialize tweepy
UpperCAmelCase_ = tweepy.OAuthHandler(A_ , A_ )
auth.set_access_token(A_ , A_ )
UpperCAmelCase_ = tweepy.API(A_ )
# initialize a list to hold all the tweepy Tweets
UpperCAmelCase_ = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCAmelCase_ = api.user_timeline(screen_name=A_ , count=200 )
# save most recent tweets
alltweets.extend(A_ )
# save the id of the oldest tweet less one
UpperCAmelCase_ = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(A_ ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
UpperCAmelCase_ = api.user_timeline(
screen_name=A_ , count=200 , max_id=A_ )
# save most recent tweets
alltweets.extend(A_ )
# update the id of the oldest tweet less one
UpperCAmelCase_ = alltweets[-1].id - 1
print(F"""...{len(A_ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCAmelCase_ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , "w" ) as f:
UpperCAmelCase_ = csv.writer(A_ )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(A_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 660 | 0 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : int = logging.get_logger(__name__)
_UpperCAmelCase : int = {
'''vocab_file''': '''vocab.json''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
'''merges_file''': '''merges.txt''',
}
_UpperCAmelCase : int = {
'''vocab_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'''
),
},
'''tokenizer_config_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'''
),
},
'''merges_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'''
),
},
}
_UpperCAmelCase : str = '''</w>'''
_UpperCAmelCase : Dict = '''@@ '''
def UpperCamelCase ( lowercase_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase =set()
lowercase =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase =char
return pairs
# Speech2Text2 has no max input length
_UpperCAmelCase : str = {'''facebook/s2t-wav2vec2-large-en-de''': 10_24}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self , snake_case_ , snake_case_="<s>" , snake_case_="<pad>" , snake_case_="</s>" , snake_case_="<unk>" , snake_case_=False , snake_case_=None , **snake_case_ , ):
super().__init__(
unk_token=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , pad_token=snake_case_ , do_lower_case=snake_case_ , **snake_case_ , )
lowercase =do_lower_case
with open(snake_case_ , encoding='''utf-8''' ) as vocab_handle:
lowercase =json.load(snake_case_ )
lowercase ={v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'No merges files provided. {self.__class__.__name__} can only be used for decoding.' )
lowercase =None
lowercase =None
else:
with open(snake_case_ , encoding='''utf-8''' ) as merges_handle:
lowercase =merges_handle.read().split('''\n''' )[:-1]
lowercase =[tuple(merge.split()[:2] ) for merge in merges]
lowercase =dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
lowercase ={}
@property
def _A( self ):
return len(self.decoder )
def _A( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _A( self , snake_case_ ):
lowercase =tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
lowercase =get_pairs(snake_case_ )
if not pairs:
return token
while True:
lowercase =min(snake_case_ , key=lambda snake_case_ : self.bpe_ranks.get(snake_case_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase =bigram
lowercase =[]
lowercase =0
while i < len(snake_case_ ):
try:
lowercase =word.index(snake_case_ , snake_case_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase =j
if word[i] == first and i < len(snake_case_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase =tuple(snake_case_ )
lowercase =new_word
if len(snake_case_ ) == 1:
break
else:
lowercase =get_pairs(snake_case_ )
lowercase =''' '''.join(snake_case_ )
if word == "\n " + BPE_TOKEN_MERGES:
lowercase ='''\n''' + BPE_TOKEN_MERGES
if word.endswith(snake_case_ ):
lowercase =word.replace(snake_case_ , '''''' )
lowercase =word.replace(''' ''' , snake_case_ )
lowercase =word
return word
def _A( self , snake_case_ ):
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
lowercase =text.lower()
lowercase =text.split()
lowercase =[]
for token in text:
if token:
split_tokens.extend(list(self.bpe(snake_case_ ).split(''' ''' ) ) )
return split_tokens
def _A( self , snake_case_ ):
return self.encoder.get(snake_case_ , self.encoder.get(self.unk_token ) )
def _A( self , snake_case_ ):
lowercase =self.decoder.get(snake_case_ , self.unk_token )
return result
def _A( self , snake_case_ ):
lowercase =''' '''.join(snake_case_ )
# make sure @@ tokens are concatenated
lowercase =''''''.join(string.split(snake_case_ ) )
return string
def _A( self , snake_case_ , snake_case_ = None ):
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase =os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase =os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + '''\n''' )
lowercase =0
if self.bpe_ranks is None:
return (vocab_file,)
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase =token_index
writer.write(''' '''.join(snake_case_ ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 72 | '''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__snake_case : int = logging.get_logger(__name__)
class lowercase_ ( _A ):
def __init__( self , **UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["bs4"] )
super().__init__(**UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCAmelCase_ = parent.find_all(child.name , recursive=UpperCamelCase__ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(UpperCamelCase__ ) else next(i for i, s in enumerate(UpperCamelCase__ , 1 ) if s is child ) )
UpperCAmelCase_ = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = BeautifulSoup(UpperCamelCase__ , "html.parser" )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for element in html_code.descendants:
if type(UpperCamelCase__ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCAmelCase_ = html.unescape(UpperCamelCase__ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = self.xpath_soup(UpperCamelCase__ )
stringaxtag_seq.append(UpperCamelCase__ )
stringaxsubs_seq.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = ""
for tagname, subs in zip(UpperCamelCase__ , UpperCamelCase__ ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , UpperCamelCase__ ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase_ = False
# Check that strings has a valid type
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = True
elif isinstance(UpperCamelCase__ , (list, tuple) ):
if len(UpperCamelCase__ ) == 0 or isinstance(html_strings[0] , UpperCamelCase__ ):
UpperCAmelCase_ = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
F"""but is of type {type(UpperCamelCase__ )}.""" )
UpperCAmelCase_ = bool(isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(html_strings[0] , UpperCamelCase__ )) )
if not is_batched:
UpperCAmelCase_ = [html_strings]
# Get nodes + xpaths
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for html_string in html_strings:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.get_three_from_single(UpperCamelCase__ )
nodes.append(UpperCamelCase__ )
UpperCAmelCase_ = []
for node, tag_list, sub_list in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = self.construct_xpath(UpperCamelCase__ , UpperCamelCase__ )
xpath_strings.append(UpperCamelCase__ )
xpaths.append(UpperCamelCase__ )
# return as Dict
UpperCAmelCase_ = {"nodes": nodes, "xpaths": xpaths}
UpperCAmelCase_ = BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
return encoded_inputs
| 660 | 0 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCamelCase__ (_UpperCAmelCase):
# A local function to see if a dot lands in the circle.
def is_in_circle(_UpperCAmelCase , _UpperCAmelCase) -> bool:
SCREAMING_SNAKE_CASE = sqrt((x**2) + (y**2))
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
SCREAMING_SNAKE_CASE = mean(
int(is_in_circle(uniform(-1.0 , 1.0) , uniform(-1.0 , 1.0)))
for _ in range(_UpperCAmelCase))
# The ratio of the area for circle to square is pi/4.
SCREAMING_SNAKE_CASE = proportion * 4
print(F'''The estimated value of pi is {pi_estimate}''')
print(F'''The numpy value of pi is {pi}''')
print(F'''The total error is {abs(pi - pi_estimate)}''')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(_UpperCAmelCase , _UpperCAmelCase)) for _ in range(_UpperCAmelCase)) * (max_value - min_value)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 1.0):
def identity_function(_UpperCAmelCase) -> float:
return x
SCREAMING_SNAKE_CASE = area_under_curve_estimator(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = (max_value * max_value - min_value * min_value) / 2
print('******************')
print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''')
print(F'''Estimated value is {estimated_value}''')
print(F'''Expected value is {expected_value}''')
print(F'''Total error is {abs(estimated_value - expected_value)}''')
print('******************')
def lowerCamelCase__ (_UpperCAmelCase):
def function_to_integrate(_UpperCAmelCase) -> float:
return sqrt(4.0 - x * x)
SCREAMING_SNAKE_CASE = area_under_curve_estimator(
_UpperCAmelCase , _UpperCAmelCase , 0.0 , 2.0)
print('******************')
print('Estimating pi using area_under_curve_estimator')
print(F'''Estimated value is {estimated_value}''')
print(F'''Expected value is {pi}''')
print(F'''Total error is {abs(estimated_value - pi)}''')
print('******************')
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | '''simple docstring'''
def lowerCamelCase__ ( A_ , A_ ):
_validate_point(A_ )
_validate_point(A_ )
if len(A_ ) != len(A_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(A_ , A_ ) ) )
def lowerCamelCase__ ( A_ ):
if point:
if isinstance(A_ , A_ ):
for item in point:
if not isinstance(A_ , (int, float) ):
UpperCAmelCase_ = (
"Expected a list of numbers as input, found "
F"""{type(A_ ).__name__}"""
)
raise TypeError(A_ )
else:
UpperCAmelCase_ = F"""Expected a list of numbers as input, found {type(A_ ).__name__}"""
raise TypeError(A_ )
else:
raise ValueError("Missing an input" )
def lowerCamelCase__ ( A_ , A_ ):
_validate_point(A_ )
_validate_point(A_ )
if len(A_ ) != len(A_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(A_ , A_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 0 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''char'''
lowerCAmelCase_ = '''bpe'''
lowerCAmelCase_ = '''wp'''
lowercase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''image_processor''', '''char_tokenizer''']
lowerCAmelCase_ = '''ViTImageProcessor'''
lowerCAmelCase_ = '''MgpstrTokenizer'''
def __init__( self : int , _A : str=None , _A : List[str]=None , **_A : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _A , )
__SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('''feature_extractor''' )
__SCREAMING_SNAKE_CASE : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer
__SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained('''gpt2''' )
__SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(_A , _A )
def __call__( self : int , _A : str=None , _A : List[Any]=None , _A : List[Any]=None , **_A : str ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(_A , return_tensors=_A , **_A )
if text is not None:
__SCREAMING_SNAKE_CASE : Any = self.char_tokenizer(_A , return_tensors=_A , **_A )
if text is None:
return inputs
elif images is None:
return encodings
else:
__SCREAMING_SNAKE_CASE : str = encodings['''input_ids''']
return inputs
def UpperCAmelCase__ ( self : List[Any] , _A : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = sequences
__SCREAMING_SNAKE_CASE : Tuple = char_preds.size(0 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self._decode_helper(_A , '''char''' )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = self._decode_helper(_A , '''bpe''' )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = self._decode_helper(_A , '''wp''' )
__SCREAMING_SNAKE_CASE : Tuple = []
__SCREAMING_SNAKE_CASE : Optional[int] = []
for i in range(_A ):
__SCREAMING_SNAKE_CASE : Any = [char_scores[i], bpe_scores[i], wp_scores[i]]
__SCREAMING_SNAKE_CASE : str = [char_strs[i], bpe_strs[i], wp_strs[i]]
__SCREAMING_SNAKE_CASE : Any = scores.index(max(_A ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__SCREAMING_SNAKE_CASE : str = {}
__SCREAMING_SNAKE_CASE : Optional[Any] = final_strs
__SCREAMING_SNAKE_CASE : Union[str, Any] = final_scores
__SCREAMING_SNAKE_CASE : int = char_strs
__SCREAMING_SNAKE_CASE : List[Any] = bpe_strs
__SCREAMING_SNAKE_CASE : List[str] = wp_strs
return out
def UpperCAmelCase__ ( self : Any , _A : Optional[int] , _A : List[Any] ):
"""simple docstring"""
if format == DecodeType.CHARACTER:
__SCREAMING_SNAKE_CASE : Any = self.char_decode
__SCREAMING_SNAKE_CASE : Any = 1
__SCREAMING_SNAKE_CASE : Dict = '''[s]'''
elif format == DecodeType.BPE:
__SCREAMING_SNAKE_CASE : Dict = self.bpe_decode
__SCREAMING_SNAKE_CASE : Dict = 2
__SCREAMING_SNAKE_CASE : Optional[int] = '''#'''
elif format == DecodeType.WORDPIECE:
__SCREAMING_SNAKE_CASE : str = self.wp_decode
__SCREAMING_SNAKE_CASE : Any = 102
__SCREAMING_SNAKE_CASE : Optional[int] = '''[SEP]'''
else:
raise ValueError(F'''Format {format} is not supported.''' )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = [], []
__SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(0 )
__SCREAMING_SNAKE_CASE : Tuple = pred_logits.size(1 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.topk(1 , dim=-1 , largest=_A , sorted=_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = preds_index.view(-1 , _A )[:, 1:]
__SCREAMING_SNAKE_CASE : Dict = decoder(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = torch.nn.functional.softmax(_A , dim=2 ).max(dim=2 )
__SCREAMING_SNAKE_CASE : List[str] = preds_max_prob[:, 1:]
for index in range(_A ):
__SCREAMING_SNAKE_CASE : Any = preds_str[index].find(_A )
__SCREAMING_SNAKE_CASE : int = preds_str[index][:pred_eos]
__SCREAMING_SNAKE_CASE : Optional[int] = preds_index[index].cpu().tolist()
__SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(_A ) if eos_token in pred_index else -1
__SCREAMING_SNAKE_CASE : Optional[int] = preds_max_prob[index][: pred_eos_index + 1]
__SCREAMING_SNAKE_CASE : Tuple = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(_A )
conf_scores.append(_A )
return dec_strs, conf_scores
def UpperCAmelCase__ ( self : List[str] , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(_A )]
return decode_strs
def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ):
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(_A )
def UpperCAmelCase__ ( self : Optional[int] , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(_A )]
return decode_strs
| 74 | '''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__snake_case : Optional[int] = logging.getLogger(__name__)
def lowerCamelCase__ ( A_ , A_ ):
# save results
if os.path.exists(A_ ):
if os.path.exists(os.path.join(A_ , "config.json" ) ) and os.path.isfile(
os.path.join(A_ , "config.json" ) ):
os.remove(os.path.join(A_ , "config.json" ) )
if os.path.exists(os.path.join(A_ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(A_ , "pytorch_model.bin" ) ):
os.remove(os.path.join(A_ , "pytorch_model.bin" ) )
else:
os.makedirs(A_ )
model.save_pretrained(A_ )
def lowerCamelCase__ ( A_ , A_=False ):
UpperCAmelCase_ = 2
if unlogit:
UpperCAmelCase_ = torch.pow(A_ , A_ )
UpperCAmelCase_ = p * torch.log(A_ )
UpperCAmelCase_ = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase__ ( A_ ):
logger.info("lv, h >\t" + "\t".join(F"""{x + 1}""" for x in range(len(A_ ) ) ) )
for row in range(len(A_ ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def lowerCamelCase__ ( A_ , A_ , A_ , A_=True , A_=True , A_=None , A_=False ):
UpperCAmelCase_ , UpperCAmelCase_ = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase_ = torch.zeros(A_ , A_ ).to(args.device )
UpperCAmelCase_ = torch.zeros(A_ , A_ ).to(args.device )
if head_mask is None:
UpperCAmelCase_ = torch.ones(A_ , A_ ).to(args.device )
head_mask.requires_grad_(requires_grad=A_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase_ = None
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 0.0
for step, inputs in enumerate(tqdm(A_ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase_ = tuple(t.to(args.device ) for t in inputs )
((UpperCAmelCase_) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase_ = model(A_ , labels=A_ , head_mask=A_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(A_ ):
UpperCAmelCase_ = entropy(attn.detach() , A_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(A_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase_ = 2
UpperCAmelCase_ = torch.pow(torch.pow(A_ , A_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
UpperCAmelCase_ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(A_ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(A_ )
logger.info("Head ranked by importance scores" )
UpperCAmelCase_ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase_ = torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase_ = head_ranks.view_as(A_ )
print_ad_tensor(A_ )
return attn_entropy, head_importance, total_loss
def lowerCamelCase__ ( A_ , A_ , A_ ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(A_ , A_ , A_ , compute_entropy=A_ )
UpperCAmelCase_ = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , A_ , original_score * args.masking_threshold )
UpperCAmelCase_ = torch.ones_like(A_ )
UpperCAmelCase_ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase_ = original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase_ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase_ = float("Inf" )
UpperCAmelCase_ = head_importance.view(-1 ).sort()[1]
if len(A_ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
UpperCAmelCase_ = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase_ = new_head_mask.view(-1 )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = new_head_mask.view_as(A_ )
UpperCAmelCase_ = new_head_mask.clone().detach()
print_ad_tensor(A_ )
# Compute metric and head importance again
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , head_mask=A_ )
UpperCAmelCase_ = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , A_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(A_ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase__ ( A_ , A_ , A_ , A_ ):
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(A_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(A_ , A_ ):
UpperCAmelCase_ = [
v,
]
assert sum(len(A_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(A_ )
UpperCAmelCase_ = sum(p.numel() for p in model.parameters() )
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ , actually_pruned=A_ , )
UpperCAmelCase_ = 1 / loss
UpperCAmelCase_ = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , A_ , A_ , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , A_ , A_ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(A_ , args.output_dir )
def lowerCamelCase__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=A_ , type=A_ , required=A_ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=A_ , type=A_ , required=A_ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=A_ , type=A_ , required=A_ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=A_ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=A_ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=A_ , type=A_ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=A_ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=A_ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=A_ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=A_ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=A_ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=A_ , help="Batch size." )
parser.add_argument("--seed" , type=A_ , default=42 )
parser.add_argument("--local_rank" , type=A_ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=A_ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=A_ , default="" , help="Can be used for distant debugging." )
UpperCAmelCase_ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase_ = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
UpperCAmelCase_ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase_ = torch.device("cuda" , args.local_rank )
UpperCAmelCase_ = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase_ = nn.parallel.DistributedDataParallel(
A_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=A_ )
elif args.n_gpu > 1:
UpperCAmelCase_ = nn.DataParallel(A_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=A_ )
torch.save(A_ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , A_ )
# Prepare dataset
UpperCAmelCase_ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase_ = (torch.from_numpy(A_ ),)
UpperCAmelCase_ = TensorDataset(*A_ )
UpperCAmelCase_ = RandomSampler(A_ )
UpperCAmelCase_ = DataLoader(A_ , sampler=A_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(A_ , A_ , A_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase_ = mask_heads(A_ , A_ , A_ )
prune_heads(A_ , A_ , A_ , A_ )
if __name__ == "__main__":
main()
| 660 | 0 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ) -> Optional[int]:
UpperCAmelCase__ : str = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
UpperCAmelCase__ : Optional[Any] = MaskFormerConfig(backbone_config=lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
UpperCAmelCase__ : Tuple = 8_47
UpperCAmelCase__ : int = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
UpperCAmelCase__ : Optional[int] = 1_50
UpperCAmelCase__ : Tuple = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
UpperCAmelCase__ : Any = 1_71
UpperCAmelCase__ : List[Any] = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
UpperCAmelCase__ : Optional[Any] = 1_33
UpperCAmelCase__ : Union[str, Any] = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
UpperCAmelCase__ : Any = 19
UpperCAmelCase__ : str = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
UpperCAmelCase__ : Dict = 65
UpperCAmelCase__ : str = '''mapillary-vistas-id2label.json'''
UpperCAmelCase__ : Dict = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase__ : Dict = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
return config
def a__ ( lowerCAmelCase__ ) -> Tuple:
UpperCAmelCase__ : Union[str, Any] = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
UpperCAmelCase__ : Tuple = dct.pop(lowerCAmelCase__ )
UpperCAmelCase__ : Dict = val
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
UpperCAmelCase__ : List[str] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase__ : Any = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase__ : Dict = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
UpperCAmelCase__ : int = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ : Dict = in_proj_weight[:dim, :]
UpperCAmelCase__ : Optional[int] = in_proj_bias[: dim]
UpperCAmelCase__ : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase__ : List[str] = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase__ : int = in_proj_weight[
-dim :, :
]
UpperCAmelCase__ : Optional[Any] = in_proj_bias[-dim :]
# fmt: on
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
# fmt: off
UpperCAmelCase__ : Optional[Any] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase__ : Optional[int] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
UpperCAmelCase__ : Optional[int] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ : Any = in_proj_weight[: hidden_size, :]
UpperCAmelCase__ : str = in_proj_bias[:config.hidden_size]
UpperCAmelCase__ : Dict = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase__ : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase__ : List[str] = in_proj_weight[-hidden_size :, :]
UpperCAmelCase__ : Any = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase__ : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
UpperCAmelCase__ : Optional[int] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ : Any = in_proj_weight[: hidden_size, :]
UpperCAmelCase__ : Union[str, Any] = in_proj_bias[:config.hidden_size]
UpperCAmelCase__ : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase__ : int = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase__ : int = in_proj_weight[-hidden_size :, :]
UpperCAmelCase__ : str = in_proj_bias[-hidden_size :]
# fmt: on
def a__ ( ) -> torch.Tensor:
UpperCAmelCase__ : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase__ : Any = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> int:
UpperCAmelCase__ : str = get_maskformer_config(lowerCAmelCase__ )
# load original state_dict
with open(lowerCAmelCase__ , '''rb''' ) as f:
UpperCAmelCase__ : List[Any] = pickle.load(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCAmelCase__ : List[str] = create_rename_keys(lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_swin_q_k_v(lowerCAmelCase__ , config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# update to torch tensors
for key, value in state_dict.items():
UpperCAmelCase__ : Any = torch.from_numpy(lowerCAmelCase__ )
# load 🤗 model
UpperCAmelCase__ : Union[str, Any] = MaskFormerForInstanceSegmentation(lowerCAmelCase__ )
model.eval()
for name, param in model.named_parameters():
print(lowerCAmelCase__ , param.shape )
UpperCAmelCase__ , UpperCAmelCase__ : str = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCAmelCase__ ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
UpperCAmelCase__ : Union[str, Any] = prepare_img()
if "vistas" in model_name:
UpperCAmelCase__ : Optional[Any] = 65
elif "cityscapes" in model_name:
UpperCAmelCase__ : Optional[int] = 6_55_35
else:
UpperCAmelCase__ : List[Any] = 2_55
UpperCAmelCase__ : Any = True if '''ade''' in model_name else False
UpperCAmelCase__ : Tuple = MaskFormerImageProcessor(ignore_index=lowerCAmelCase__ , reduce_labels=lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = image_processor(lowerCAmelCase__ , return_tensors='''pt''' )
UpperCAmelCase__ : List[str] = model(**lowerCAmelCase__ )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCAmelCase__ : List[Any] = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
image_processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''maskformer-swin-tiny-ade''',
type=str,
help=('''Name of the MaskFormer model you\'d like to convert''',),
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''',
type=str,
help='''Path to the original state dict (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCamelCase__ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 75 | '''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__snake_case : str = logging.getLogger(__name__)
def lowerCamelCase__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=A_ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=A_ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=A_ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=A_ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase_ = parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
UpperCAmelCase_ = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase_ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase_ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase_ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase_ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase_ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase_ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase_ = fp.readlines()
logger.info("Start encoding" )
logger.info(F"""{len(A_ )} examples to process.""" )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
UpperCAmelCase_ = 10_000
UpperCAmelCase_ = time.time()
for text in data:
UpperCAmelCase_ = F"""{bos} {text.strip()} {sep}"""
UpperCAmelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
rslt.append(A_ )
iter += 1
if iter % interval == 0:
UpperCAmelCase_ = time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
UpperCAmelCase_ = time.time()
logger.info("Finished binarization" )
logger.info(F"""{len(A_ )} examples processed.""" )
UpperCAmelCase_ = F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
UpperCAmelCase_ = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase_ = [np.uintaa(A_ ) for d in rslt]
else:
UpperCAmelCase_ = [np.intaa(A_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(A_ , "wb" ) as handle:
pickle.dump(rslt_ , A_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 660 | 0 |
"""simple docstring"""
from functools import lru_cache
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : str = 2
__lowercase : int = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__UpperCamelCase )
if n > 1:
factors.add(__UpperCamelCase )
return factors
@lru_cache
def __UpperCAmelCase ( __UpperCamelCase ):
return len(unique_prime_factors(__UpperCamelCase ) )
def __UpperCAmelCase ( __UpperCamelCase ):
return len(set(__UpperCamelCase ) ) in (0, 1)
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Optional[Any] = 2
while True:
# Increment each value of a generated range
__lowercase : Dict = [base + i for i in range(__UpperCamelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__lowercase : Optional[int] = [upf_len(__UpperCamelCase ) for x in group]
checker.append(__UpperCamelCase )
# If all numbers in the list are equal, return the group variable.
if equality(__UpperCamelCase ):
return group
# Increment our base variable by 1
base += 1
def __UpperCAmelCase ( __UpperCamelCase = 4 ):
__lowercase : int = run(__UpperCamelCase )
return results[0] if len(__UpperCamelCase ) else None
if __name__ == "__main__":
print(solution())
| 76 | '''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__snake_case : Dict = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__snake_case : str = json.load(f)
@require_torch
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = FSMTForConditionalGeneration.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = F"""facebook/wmt19-{pair}"""
UpperCAmelCase_ = self.get_tokenizer(UpperCamelCase__ )
UpperCAmelCase_ = self.get_model(UpperCamelCase__ )
UpperCAmelCase_ = bleu_data[pair]["src"]
UpperCAmelCase_ = bleu_data[pair]["tgt"]
UpperCAmelCase_ = tokenizer(UpperCamelCase__ , return_tensors="pt" , truncation=UpperCamelCase__ , padding="longest" ).to(UpperCamelCase__ )
UpperCAmelCase_ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase_ = tokenizer.batch_decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
UpperCAmelCase_ = calculate_bleu(UpperCamelCase__ , UpperCamelCase__ )
print(UpperCamelCase__ )
self.assertGreaterEqual(scores["bleu"] , UpperCamelCase__ )
| 660 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 77 | '''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__snake_case : List[Any] = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Optional[int] = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : int = {
'''sample_size''': 2_56,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Dict = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : Tuple = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : str = {
'''num_train_timesteps''': 1_51,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def lowerCamelCase__ ( A_ ):
if isinstance(A_ , A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=False ):
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=None ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.bias"""]
UpperCAmelCase_ = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = (
checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ ):
UpperCAmelCase_ = torch.load(A_ , map_location="cpu" )
UpperCAmelCase_ = {}
UpperCAmelCase_ = checkpoint["time_embed.0.weight"]
UpperCAmelCase_ = checkpoint["time_embed.0.bias"]
UpperCAmelCase_ = checkpoint["time_embed.2.weight"]
UpperCAmelCase_ = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase_ = checkpoint["label_emb.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.bias"]
UpperCAmelCase_ = unet_config["down_block_types"]
UpperCAmelCase_ = unet_config["layers_per_block"]
UpperCAmelCase_ = unet_config["attention_head_dim"]
UpperCAmelCase_ = unet_config["block_out_channels"]
UpperCAmelCase_ = 1
UpperCAmelCase_ = channels_list[0]
for i, layer_type in enumerate(A_ ):
UpperCAmelCase_ = channels_list[i]
UpperCAmelCase_ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""down_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""down_blocks.{i}.downsamplers.0"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
current_layer += 1
UpperCAmelCase_ = current_channels
# hardcoded the mid-block for now
UpperCAmelCase_ = "mid_block.resnets.0"
UpperCAmelCase_ = "middle_block.0"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.attentions.0"
UpperCAmelCase_ = "middle_block.1"
UpperCAmelCase_ = convert_attention(A_ , A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.resnets.1"
UpperCAmelCase_ = "middle_block.2"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = 0
UpperCAmelCase_ = unet_config["up_block_types"]
for i, layer_type in enumerate(A_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.1"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""up_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.2"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = checkpoint["out.0.weight"]
UpperCAmelCase_ = checkpoint["out.0.bias"]
UpperCAmelCase_ = checkpoint["out.2.weight"]
UpperCAmelCase_ = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
__snake_case : List[str] = parser.parse_args()
__snake_case : Any = strabool(args.class_cond)
__snake_case : List[str] = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
__snake_case : Optional[int] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__snake_case : List[str] = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
__snake_case : Optional[Any] = None
__snake_case : Optional[int] = con_pt_to_diffuser(args.unet_path, unet_config)
__snake_case : str = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__snake_case : Tuple = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__snake_case : Optional[int] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
__snake_case : Optional[Any] = CMStochasticIterativeScheduler(**scheduler_config)
__snake_case : Dict = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 660 | 0 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowerCAmelCase_ ( snake_case_ : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
'''simple docstring'''
UpperCAmelCase_ = []
if isinstance(snake_case_ , snake_case_ ):
for v in tree.values():
shapes.extend(_fetch_dims(snake_case_ ) )
elif isinstance(snake_case_ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(snake_case_ ) )
elif isinstance(snake_case_ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Tuple[int, ...] ) -> Tuple[int, ...]:
'''simple docstring'''
UpperCAmelCase_ = []
for d in reversed(snake_case_ ):
idx.append(flat_idx % d )
UpperCAmelCase_ = flat_idx // d
return tuple(reversed(snake_case_ ) )
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Sequence[int] , snake_case_ : Optional[Sequence[bool]] = None , snake_case_ : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
'''simple docstring'''
def reduce_edge_list(snake_case_ : List[bool] ) -> None:
UpperCAmelCase_ = True
for i in range(len(snake_case_ ) ):
UpperCAmelCase_ = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCAmelCase_ = l[reversed_idx]
if start_edges is None:
UpperCAmelCase_ = [s == 0 for s in start]
reduce_edge_list(snake_case_ )
if end_edges is None:
UpperCAmelCase_ = [e == (d - 1) for e, d in zip(snake_case_ , snake_case_ )]
reduce_edge_list(snake_case_ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(snake_case_ ) == 0:
return [()]
elif len(snake_case_ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(snake_case_ , snake_case_ ):
if s == e:
path_list.append(slice(snake_case_ , s + 1 ) )
else:
break
UpperCAmelCase_ = tuple(snake_case_ )
UpperCAmelCase_ = len(snake_case_ )
# start == end, and we're done
if divergence_idx == len(snake_case_ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = start[divergence_idx]
return tuple(
path + (slice(snake_case_ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = end[divergence_idx]
return tuple(
path + (slice(snake_case_ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCAmelCase_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowerCAmelCase_ ( snake_case_ : torch.Tensor , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> torch.Tensor:
'''simple docstring'''
UpperCAmelCase_ = t.shape[:no_batch_dims]
UpperCAmelCase_ = list(_flat_idx_to_idx(snake_case_ , snake_case_ ) )
# _get_minimal_slice_set is inclusive
UpperCAmelCase_ = list(_flat_idx_to_idx(flat_end - 1 , snake_case_ ) )
# Get an ordered list of slices to perform
UpperCAmelCase_ = _get_minimal_slice_set(
snake_case_ , snake_case_ , snake_case_ , )
UpperCAmelCase_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowerCAmelCase_ ( snake_case_ : Callable , snake_case_ : Dict[str, Any] , snake_case_ : int , snake_case_ : int , snake_case_ : bool = False , snake_case_ : Any = None , snake_case_ : bool = False , ) -> Any:
'''simple docstring'''
if not (len(snake_case_ ) > 0):
raise ValueError("Must provide at least one input" )
UpperCAmelCase_ = [shape[:no_batch_dims] for shape in _fetch_dims(snake_case_ )]
UpperCAmelCase_ = tuple([max(snake_case_ ) for s in zip(*snake_case_ )] )
def _prep_inputs(snake_case_ : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCAmelCase_ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCAmelCase_ = tensor_tree_map(_prep_inputs , snake_case_ )
UpperCAmelCase_ = None
if _out is not None:
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
UpperCAmelCase_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCAmelCase_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(snake_case_ : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCAmelCase_ = 0
UpperCAmelCase_ = prepped_outputs
for _ in range(snake_case_ ):
# Chunk the input
if not low_mem:
UpperCAmelCase_ = _select_chunk
else:
UpperCAmelCase_ = partial(
_chunk_slice , flat_start=snake_case_ , flat_end=min(snake_case_ , i + chunk_size ) , no_batch_dims=len(snake_case_ ) , )
UpperCAmelCase_ = tensor_tree_map(snake_case_ , snake_case_ )
# Run the layer on the chunk
UpperCAmelCase_ = layer(**snake_case_ )
# Allocate space for the output
if out is None:
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , snake_case_ )
# Put the chunk in its pre-allocated space
if isinstance(snake_case_ , snake_case_ ):
def assign(snake_case_ : dict , snake_case_ : dict ) -> None:
for k, v in da.items():
if isinstance(snake_case_ , snake_case_ ):
assign(snake_case_ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCAmelCase_ = da[k]
assign(snake_case_ , snake_case_ )
elif isinstance(snake_case_ , snake_case_ ):
for xa, xa in zip(snake_case_ , snake_case_ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCAmelCase_ = xa
elif isinstance(snake_case_ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCAmelCase_ = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
UpperCAmelCase_ = tensor_tree_map(lambda snake_case_ : t.view(orig_batch_dims + t.shape[1:] ) , snake_case_ )
return out
class __A :
def __init__(self : Dict , __a : int = 512 , ):
UpperCAmelCase_ = max_chunk_size
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def _lowercase (self : List[Any] , __a : Callable , __a : tuple , __a : int ):
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCAmelCase_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCAmelCase_ = [c for c in candidates if c > min_chunk_size]
UpperCAmelCase_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__a : int ) -> bool:
try:
with torch.no_grad():
fn(*__a , chunk_size=__a )
return True
except RuntimeError:
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(__a ) - 1
while i > min_viable_chunk_size_index:
UpperCAmelCase_ = test_chunk_size(candidates[i] )
if not viable:
UpperCAmelCase_ = (min_viable_chunk_size_index + i) // 2
else:
UpperCAmelCase_ = i
UpperCAmelCase_ = (i + len(__a ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _lowercase (self : int , __a : Iterable , __a : Iterable ):
UpperCAmelCase_ = True
for aa, aa in zip(__a , __a ):
assert type(__a ) == type(__a )
if isinstance(__a , (list, tuple) ):
consistent &= self._compare_arg_caches(__a , __a )
elif isinstance(__a , __a ):
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
consistent &= self._compare_arg_caches(__a , __a )
else:
consistent &= aa == aa
return consistent
def _lowercase (self : List[str] , __a : Callable , __a : tuple , __a : int , ):
UpperCAmelCase_ = True
UpperCAmelCase_ = tree_map(lambda __a : a.shape if isinstance(__a , torch.Tensor ) else a , __a , __a )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__a )
UpperCAmelCase_ = self._compare_arg_caches(self.cached_arg_data , __a )
else:
# Otherwise, we can reuse the precomputed value
UpperCAmelCase_ = False
if not consistent:
UpperCAmelCase_ = self._determine_favorable_chunk_size(
__a , __a , __a , )
UpperCAmelCase_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 78 | '''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
__snake_case : Any = _symbol_database.Default()
__snake_case : Dict = _descriptor_pool.Default().AddSerializedFile(
B'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'''
)
__snake_case : Union[str, Any] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
__snake_case : Any = None
__snake_case : Dict = B'''H\003'''
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
__snake_case : Union[str, Any] = 45
__snake_case : str = 15_81
__snake_case : Optional[int] = 15_17
__snake_case : Optional[Any] = 15_70
__snake_case : Union[str, Any] = 15_84
__snake_case : Any = 17_93
__snake_case : Optional[int] = 17_95
__snake_case : Tuple = 19_16
__snake_case : int = 18_64
__snake_case : Any = 19_05
__snake_case : Optional[int] = 19_19
__snake_case : str = 24_29
__snake_case : Tuple = 22_08
__snake_case : str = 24_18
__snake_case : Tuple = 23_23
__snake_case : Optional[int] = 24_07
# @@protoc_insertion_point(module_scope)
| 660 | 0 |
from collections import deque
from .hash_table import HashTable
class UpperCAmelCase_ ( __lowerCamelCase ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_lowerCAmelCase )
UpperCAmelCase__ : List[str] = self.values[key]
def __UpperCAmelCase ( self ):
return (
sum(self.charge_factor - len(_lowerCAmelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_lowerCAmelCase ) == 0
):
return key
return super()._collision_resolution(_lowerCAmelCase , _lowerCAmelCase )
| 79 | '''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase_ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = "The dog is cute and lives in the garden house"
UpperCAmelCase_ = jnp.array([tokenizer.encode(UpperCamelCase__ )] )
UpperCAmelCase_ = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
UpperCAmelCase_ = model(UpperCamelCase__ )["last_hidden_state"]
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1e-3 ) )
| 660 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.