code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class lowerCamelCase ( unittest.TestCase ):
def snake_case__ ( self :Optional[int] , lowercase :Dict ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 2_5_0
SCREAMING_SNAKE_CASE = ids_tensor((batch_size, length) , _lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.ones((batch_size, length) , device=_lowerCamelCase , dtype=torch.float ) / length
return input_ids, scores
def snake_case__ ( self :str ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_tensors(5 )
SCREAMING_SNAKE_CASE = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=1_0 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_tensors(9 )
self.assertFalse(criteria(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_tensors(1_0 )
self.assertTrue(criteria(_lowerCamelCase , _lowerCamelCase ) )
def snake_case__ ( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = MaxLengthCriteria(max_length=1_0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_tensors(5 )
self.assertFalse(criteria(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_tensors(9 )
self.assertFalse(criteria(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_tensors(1_0 )
self.assertTrue(criteria(_lowerCamelCase , _lowerCamelCase ) )
def snake_case__ ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_tensors(5 )
self.assertFalse(criteria(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_tensors(9 )
self.assertFalse(criteria(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_tensors(1_0 )
self.assertTrue(criteria(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 1_0 )
def snake_case__ ( self :int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_tensors(5 )
SCREAMING_SNAKE_CASE = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(_lowerCamelCase , _lowerCamelCase ) )
def snake_case__ ( self :Tuple ) -> Union[str, Any]:
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 )
with self.assertWarns(_lowerCamelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 )
SCREAMING_SNAKE_CASE = validate_stopping_criteria(StoppingCriteriaList() , 1_1 )
self.assertEqual(len(_lowerCamelCase ) , 1 ) | 201 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : Union[str, Any] ={'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str =[
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__magic_name__ : List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 664 | 0 |
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
UpperCamelCase : Optional[Any] = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
UpperCamelCase : Any = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
UpperCamelCase : int = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ):
return float((preds == labels).mean() )
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ):
lowerCamelCase__ = simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ = float(fa_score(y_true=lowerCamelCase_ , y_pred=lowerCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
lowerCamelCase__ = np.array(lowerCamelCase_ )
lowerCamelCase__ = np.array(lowerCamelCase_ )
lowerCamelCase__ = en_sentvecs.shape[0]
# mean centering
lowerCamelCase__ = en_sentvecs - np.mean(lowerCamelCase_ , axis=0 )
lowerCamelCase__ = in_sentvecs - np.mean(lowerCamelCase_ , axis=0 )
lowerCamelCase__ = cdist(lowerCamelCase_ , lowerCamelCase_ , """cosine""" )
lowerCamelCase__ = np.array(range(lowerCamelCase_ ) )
lowerCamelCase__ = sim.argsort(axis=1 )[:, :10]
lowerCamelCase__ = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCamelCase__ (datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
"""references""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format="""numpy""" if self.config_name != """cvit-mkb-clsr""" else None ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(_lowerCamelCase ,_lowerCamelCase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(_lowerCamelCase ,_lowerCamelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
| 50 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ : Optional[Any] ={
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : int =['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Dict =[
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Tuple =[
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__magic_name__ : int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 664 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class a ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Any:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
_a : List[str] = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(_lowerCamelCase )
def __UpperCamelCase ( self ) -> List[str]:
_a : List[str] = 'sshleifer/tiny-gpt2'
_a : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_lowerCamelCase , multi_process=_lowerCamelCase , )
_a : List[Any] = TensorFlowBenchmark(_lowerCamelCase )
_a : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self ) -> List[Any]:
_a : Dict = 'sgugger/tiny-distilbert-classification'
_a : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , only_pretrain_model=_lowerCamelCase , )
_a : Optional[int] = TensorFlowBenchmark(_lowerCamelCase )
_a : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self ) -> str:
_a : Any = 'sshleifer/tiny-gpt2'
_a : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
_a : Union[str, Any] = TensorFlowBenchmark(_lowerCamelCase )
_a : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self ) -> Tuple:
_a : Union[str, Any] = 'sshleifer/tiny-gpt2'
_a : Optional[int] = AutoConfig.from_pretrained(_lowerCamelCase )
_a : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_lowerCamelCase , multi_process=_lowerCamelCase , )
_a : Any = TensorFlowBenchmark(_lowerCamelCase , [config] )
_a : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self ) -> str:
_a : List[Any] = 'sshleifer/tiny-gpt2'
_a : Union[str, Any] = AutoConfig.from_pretrained(_lowerCamelCase )
_a : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
_a : Union[str, Any] = TensorFlowBenchmark(_lowerCamelCase , [config] )
_a : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self ) -> str:
_a : Optional[int] = 'sshleifer/tiny-gpt2'
_a : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
_a : Union[str, Any] = TensorFlowBenchmark(_lowerCamelCase )
_a : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCamelCase ( self ) -> Tuple:
_a : int = 'sshleifer/tiny-gpt2'
_a : Dict = AutoConfig.from_pretrained(_lowerCamelCase )
_a : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
_a : Optional[int] = TensorFlowBenchmark(_lowerCamelCase , [config] )
_a : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCamelCase ( self ) -> Any:
_a : Optional[int] = 'patrickvonplaten/t5-tiny-random'
_a : List[Any] = AutoConfig.from_pretrained(_lowerCamelCase )
_a : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
_a : List[Any] = TensorFlowBenchmark(_lowerCamelCase , configs=[config] )
_a : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def __UpperCamelCase ( self ) -> str:
_a : List[Any] = 'sshleifer/tiny-gpt2'
_a : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_lowerCamelCase , multi_process=_lowerCamelCase , )
_a : Any = TensorFlowBenchmark(_lowerCamelCase )
_a : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self ) -> str:
_a : str = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
_a : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_lowerCamelCase , save_to_csv=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowerCamelCase , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(_lowerCamelCase , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(_lowerCamelCase , 'env.csv' ) , multi_process=_lowerCamelCase , )
_a : List[Any] = TensorFlowBenchmark(_lowerCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCamelCase , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , 'env.csv' ) ).exists() )
def __UpperCamelCase ( self ) -> List[Any]:
_a : str = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(lowerCamelCase_ ):
self.assertTrue(hasattr(_lowerCamelCase , 'sequential' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'cumulative' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'current' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_a : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowerCamelCase , 'log.txt' ) , log_print=_lowerCamelCase , trace_memory_line_by_line=_lowerCamelCase , eager_mode=_lowerCamelCase , multi_process=_lowerCamelCase , )
_a : List[Any] = TensorFlowBenchmark(_lowerCamelCase )
_a : Optional[int] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_lowerCamelCase , 'log.txt' ) ).exists() )
| 120 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
__magic_name__ : str ={
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
__magic_name__ : Tuple ={
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def __snake_case ( lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
__magic_name__ = (images / 2 + 0.5).clamp(0 , 1 )
__magic_name__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__magic_name__ = numpy_to_pil(lowerCamelCase_ )
return images
def __snake_case ( lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
if images.ndim == 3:
__magic_name__ = images[None, ...]
__magic_name__ = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__magic_name__ = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
__magic_name__ = [Image.fromarray(lowerCamelCase_ ) for image in images]
return pil_images
| 664 | 0 |
"""simple docstring"""
class lowerCAmelCase__ :
def __init__( self ):
'''simple docstring'''
A__ = {} # Mapping from char to TrieNode
A__ = False
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
for word in words:
self.insert(_lowerCamelCase )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = self
for char in word:
if char not in curr.nodes:
A__ = TrieNode()
A__ = curr.nodes[char]
A__ = True
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = self
for char in word:
if char not in curr.nodes:
return False
A__ = curr.nodes[char]
return curr.is_leaf
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
def _delete(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
if index == len(_lowerCamelCase ):
# If word does not exist
if not curr.is_leaf:
return False
A__ = False
return len(curr.nodes ) == 0
A__ = word[index]
A__ = curr.nodes.get(_lowerCamelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
A__ = _delete(_lowerCamelCase , _lowerCamelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , _lowerCamelCase , 0 )
def __a ( A , A ) -> List[str]:
'''simple docstring'''
if node.is_leaf:
print(lowerCamelCase_ , end=" " )
for key, value in node.nodes.items():
print_words(lowerCamelCase_ , word + key )
def __a ( ) -> List[str]:
'''simple docstring'''
A__ = "banana bananas bandana band apple all beast".split()
A__ = TrieNode()
root.insert_many(lowerCamelCase_ )
# print_words(root, "")
assert all(root.find(lowerCamelCase_ ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def __a ( A , A ) -> int:
'''simple docstring'''
print(str(lowerCamelCase_ ) , "works!" if passes else "doesn't work :(" )
def __a ( ) -> List[Any]:
'''simple docstring'''
assert test_trie()
def __a ( ) -> Any:
'''simple docstring'''
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main() | 337 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__magic_name__ : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(
A , r'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __A ( self : Any , _lowerCamelCase : GenericTensor ) -> np.ndarray:
if self.framework == "tf":
__magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase )
else:
raise ValueError("Unsupported framework" )
return masked_index
def __A ( self : str , _lowerCamelCase : GenericTensor ) -> np.ndarray:
__magic_name__ = self.get_masked_index(_lowerCamelCase )
__magic_name__ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def __A ( self : int , _lowerCamelCase : GenericTensor ) -> Any:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowerCamelCase )
def __A ( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Any=None , **_lowerCamelCase : List[str] ) -> Dict[str, GenericTensor]:
if return_tensors is None:
__magic_name__ = self.framework
__magic_name__ = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase )
self.ensure_exactly_one_mask_token(_lowerCamelCase )
return model_inputs
def __A ( self : List[str] , _lowerCamelCase : int ) -> List[Any]:
__magic_name__ = self.model(**_lowerCamelCase )
__magic_name__ = model_inputs["input_ids"]
return model_outputs
def __A ( self : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any]=5 , _lowerCamelCase : Dict=None ) -> Dict:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__magic_name__ = target_ids.shape[0]
__magic_name__ = model_outputs["input_ids"][0]
__magic_name__ = model_outputs["logits"]
if self.framework == "tf":
__magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__magic_name__ = outputs.numpy()
__magic_name__ = outputs[0, masked_index, :]
__magic_name__ = stable_softmax(_lowerCamelCase , axis=-1 )
if target_ids is not None:
__magic_name__ = tf.gather_nd(tf.squeeze(_lowerCamelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
__magic_name__ = tf.expand_dims(_lowerCamelCase , 0 )
__magic_name__ = tf.math.top_k(_lowerCamelCase , k=_lowerCamelCase )
__magic_name__ , __magic_name__ = topk.values.numpy(), topk.indices.numpy()
else:
__magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__magic_name__ = outputs[0, masked_index, :]
__magic_name__ = logits.softmax(dim=-1 )
if target_ids is not None:
__magic_name__ = probs[..., target_ids]
__magic_name__ , __magic_name__ = probs.topk(_lowerCamelCase )
__magic_name__ = []
__magic_name__ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__magic_name__ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__magic_name__ = input_ids.numpy().copy()
if target_ids is not None:
__magic_name__ = target_ids[p].tolist()
__magic_name__ = p
# Filter padding out:
__magic_name__ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__magic_name__ = self.tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
__magic_name__ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(_lowerCamelCase )
result.append(_lowerCamelCase )
if single_mask:
return result[0]
return result
def __A ( self : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[Any]=None ) -> List[str]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = [targets]
try:
__magic_name__ = self.tokenizer.get_vocab()
except Exception:
__magic_name__ = {}
__magic_name__ = []
for target in targets:
__magic_name__ = vocab.get(_lowerCamelCase , _lowerCamelCase )
if id_ is None:
__magic_name__ = self.tokenizer(
_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , max_length=1 , truncation=_lowerCamelCase , )["input_ids"]
if len(_lowerCamelCase ) == 0:
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
"We cannot replace it with anything meaningful, ignoring it" )
continue
__magic_name__ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
__magic_name__ = list(set(_lowerCamelCase ) )
if len(_lowerCamelCase ) == 0:
raise ValueError("At least one target must be provided when passed." )
__magic_name__ = np.array(_lowerCamelCase )
return target_ids
def __A ( self : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : int=None ) -> Tuple:
__magic_name__ = {}
if targets is not None:
__magic_name__ = self.get_target_ids(_lowerCamelCase , _lowerCamelCase )
__magic_name__ = target_ids
if top_k is not None:
__magic_name__ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__( self : int , _lowerCamelCase : Any , *_lowerCamelCase : str , **_lowerCamelCase : int ) -> Optional[int]:
__magic_name__ = super().__call__(_lowerCamelCase , **_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) == 1:
return outputs[0]
return outputs
| 664 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 311 |
'''simple docstring'''
from __future__ import annotations
def __snake_case ( lowerCamelCase_ : list[int] , lowerCamelCase_ : int ):
'''simple docstring'''
if len(lowerCamelCase_ ) < k or k < 0:
raise ValueError("Invalid Input" )
__magic_name__ = __magic_name__ = sum(array[:k] )
for i in range(len(lowerCamelCase_ ) - k ):
__magic_name__ = current_sum - array[i] + array[i + k]
__magic_name__ = max(lowerCamelCase_ , lowerCamelCase_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__magic_name__ : List[str] =[randint(-10_00, 10_00) for i in range(1_00)]
__magic_name__ : List[str] =randint(0, 1_10)
print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 664 | 0 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__=False ):
"""simple docstring"""
try:
lowerCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
lowerCamelCase = strtobool(lowerCamelCase_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
a_ : Optional[int] = parse_flag_from_env('RUN_SLOW', default=False)
a_ : Optional[Any] = parse_flag_from_env('RUN_REMOTE', default=False)
a_ : Union[str, Any] = parse_flag_from_env('RUN_LOCAL', default=True)
a_ : Optional[Any] = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
a_ : List[str] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
a_ : Optional[Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
a_ : str = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
a_ : Dict = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
a_ : Dict = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
a_ : List[Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
a_ : List[str] = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
lowerCamelCase = unittest.skip("test requires faiss" )(lowerCamelCase_ )
return test_case
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
lowerCamelCase = unittest.skip("test requires regex" )(lowerCamelCase_ )
return test_case
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
lowerCamelCase = unittest.skip("test requires elasticsearch" )(lowerCamelCase_ )
return test_case
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
lowerCamelCase = unittest.skip("test requires sqlalchemy" )(lowerCamelCase_ )
return test_case
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
lowerCamelCase = unittest.skip("test requires PyTorch" )(lowerCamelCase_ )
return test_case
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
if not config.TF_AVAILABLE:
lowerCamelCase = unittest.skip("test requires TensorFlow" )(lowerCamelCase_ )
return test_case
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
lowerCamelCase = unittest.skip("test requires JAX" )(lowerCamelCase_ )
return test_case
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
lowerCamelCase = unittest.skip("test requires Pillow" )(lowerCamelCase_ )
return test_case
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(lowerCamelCase_ )
else:
return test_case
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(lowerCamelCase_ )
else:
return test_case
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(lowerCamelCase_ )
else:
return test_case
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
def _require_spacy_model(UpperCAmelCase__ ):
try:
import spacy # noqa F401
spacy.load(lowerCamelCase_ )
except ImportError:
return unittest.skip("test requires spacy" )(lowerCamelCase_ )
except OSError:
return unittest.skip("test requires spacy model '{}'".format(lowerCamelCase_ ) )(lowerCamelCase_ )
else:
return test_case
return _require_spacy_model
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(lowerCamelCase_ )
else:
return test_case
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(lowerCamelCase_ )
else:
return test_case
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
lowerCamelCase = unittest.skip("test is slow" )(lowerCamelCase_ )
return test_case
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
lowerCamelCase = unittest.skip("test is local" )(lowerCamelCase_ )
return test_case
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
lowerCamelCase = unittest.skip("test is packaged" )(lowerCamelCase_ )
return test_case
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
lowerCamelCase = unittest.skip("test requires remote" )(lowerCamelCase_ )
return test_case
def __lowercase( *UpperCAmelCase__ ):
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(lowerCamelCase_ ) and name.startswith("test" ):
for decorator in decorators:
lowerCamelCase = decorator(lowerCamelCase_ )
setattr(cls , lowerCamelCase_ , lowerCamelCase_ )
return cls
return decorate
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
pass
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
_A = 0
_A = 1
_A = 2
@contextmanager
def __lowercase( UpperCAmelCase__=OfflineSimulationMode.CONNECTION_FAILS , UpperCAmelCase__=1E-16 ):
"""simple docstring"""
lowerCamelCase = requests.Session().request
def timeout_request(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ):
# Change the url to an invalid url so that the connection hangs
lowerCamelCase = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
lowerCamelCase = timeout
try:
return online_request(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowerCamelCase = url
lowerCamelCase = e.args[0]
lowerCamelCase = (max_retry_error.args[0].replace("10.255.255.1" , F"""OfflineMock[{url}]""" ),)
lowerCamelCase = (max_retry_error,)
raise
def raise_connection_error(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ):
raise requests.ConnectionError("Offline mode is enabled." , request=lowerCamelCase_ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , lowerCamelCase_ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , lowerCamelCase_ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , lowerCamelCase_ ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def __lowercase( *UpperCAmelCase__ , **UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*lowerCamelCase_ , **lowerCamelCase_ ) as tmp_dir:
try:
os.chdir(lowerCamelCase_ )
yield
finally:
os.chdir(lowerCamelCase_ )
@contextmanager
def __lowercase( ):
"""simple docstring"""
import gc
gc.collect()
lowerCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __lowercase( ):
"""simple docstring"""
import gc
gc.collect()
lowerCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
return deepcopy(lowerCamelCase_ ).integers(0 , 100 , 10 ).tolist() == deepcopy(lowerCamelCase_ ).integers(0 , 100 , 10 ).tolist()
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ ):
try:
return func(*lowerCamelCase_ , **lowerCamelCase_ )
except HTTPError as err:
if str(lowerCamelCase_ ).startswith("500" ) or str(lowerCamelCase_ ).startswith("502" ):
pytest.xfail(str(lowerCamelCase_ ) )
raise err
return decorator.decorator(_wrapper , lowerCamelCase_ )
class lowerCamelCase__ :
"""simple docstring"""
def __init__(self , __a , __a , __a ):
'''simple docstring'''
lowerCamelCase = returncode
lowerCamelCase = stdout
lowerCamelCase = stderr
async def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
while True:
lowerCamelCase = await stream.readline()
if line:
callback(lowerCamelCase_ )
else:
break
async def __lowercase( UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=False , UpperCAmelCase__=False ):
"""simple docstring"""
if echo:
print("\nRunning: " , " ".join(lowerCamelCase_ ) )
lowerCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCamelCase_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCamelCase = []
lowerCamelCase = []
def tee(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__="" ):
lowerCamelCase = line.decode("utf-8" ).rstrip()
sink.append(lowerCamelCase_ )
if not quiet:
print(lowerCamelCase_ , lowerCamelCase_ , file=lowerCamelCase_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda UpperCAmelCase__ : tee(lowerCamelCase_ , lowerCamelCase_ , sys.stdout , label="stdout:" ) ),
_read_stream(p.stderr , lambda UpperCAmelCase__ : tee(lowerCamelCase_ , lowerCamelCase_ , sys.stderr , label="stderr:" ) ),
] , timeout=lowerCamelCase_ , )
return _RunOutput(await p.wait() , lowerCamelCase_ , lowerCamelCase_ )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=180 , UpperCAmelCase__=False , UpperCAmelCase__=True ):
"""simple docstring"""
lowerCamelCase = asyncio.get_event_loop()
lowerCamelCase = loop.run_until_complete(
_stream_subprocess(lowerCamelCase_ , env=lowerCamelCase_ , stdin=lowerCamelCase_ , timeout=lowerCamelCase_ , quiet=lowerCamelCase_ , echo=lowerCamelCase_ ) )
lowerCamelCase = " ".join(lowerCamelCase_ )
if result.returncode > 0:
lowerCamelCase = "\n".join(result.stderr )
raise RuntimeError(
F"""\'{cmd_str}\' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""\'{cmd_str}\' produced no output.""" )
return result
def __lowercase( ):
"""simple docstring"""
lowerCamelCase = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" )
lowerCamelCase = re.sub(r"^gw" , "" , lowerCamelCase_ , 0 , re.M )
return int(lowerCamelCase_ )
def __lowercase( ):
"""simple docstring"""
lowerCamelCase = 29500
lowerCamelCase = pytest_xdist_worker_id()
return port + uniq_delta | 623 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : int =logging.get_logger(__name__)
__magic_name__ : List[Any] ={}
class UpperCamelCase_ ( A ):
"""simple docstring"""
UpperCAmelCase__ : int = '''llama'''
UpperCAmelCase__ : Any = ['''past_key_values''']
def __init__( self : List[Any] , _lowerCamelCase : List[Any]=3_20_00 , _lowerCamelCase : Optional[Any]=40_96 , _lowerCamelCase : Tuple=1_10_08 , _lowerCamelCase : List[Any]=32 , _lowerCamelCase : Tuple=32 , _lowerCamelCase : List[str]=None , _lowerCamelCase : str="silu" , _lowerCamelCase : Optional[Any]=20_48 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : Union[str, Any]=1e-6 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Dict=0 , _lowerCamelCase : int=1 , _lowerCamelCase : str=2 , _lowerCamelCase : List[Any]=1 , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : List[str]=None , **_lowerCamelCase : List[Any] , ) -> Any:
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = hidden_size
__magic_name__ = intermediate_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__magic_name__ = num_attention_heads
__magic_name__ = num_key_value_heads
__magic_name__ = hidden_act
__magic_name__ = initializer_range
__magic_name__ = rms_norm_eps
__magic_name__ = pretraining_tp
__magic_name__ = use_cache
__magic_name__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase , )
def __A ( self : Union[str, Any] ) -> List[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'got {self.rope_scaling}' )
__magic_name__ = self.rope_scaling.get("type" , _lowerCamelCase )
__magic_name__ = self.rope_scaling.get("factor" , _lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(_lowerCamelCase , _lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 664 | 0 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , __A : Tuple , __A : Dict=3 , __A : Union[str, Any]=3_2 , __A : str=3 , __A : List[Any]=1_0 , __A : Dict=[1_0, 2_0, 3_0, 4_0] , __A : str=[1, 1, 2, 1] , __A : Dict=True , __A : Dict=True , __A : Optional[int]="relu" , __A : Tuple=3 , __A : Dict=None , ):
snake_case__ : Union[str, Any] = parent
snake_case__ : List[str] = batch_size
snake_case__ : List[str] = image_size
snake_case__ : str = num_channels
snake_case__ : Any = embeddings_size
snake_case__ : str = hidden_sizes
snake_case__ : Tuple = depths
snake_case__ : int = is_training
snake_case__ : int = use_labels
snake_case__ : Optional[int] = hidden_act
snake_case__ : Any = num_labels
snake_case__ : List[str] = scope
snake_case__ : Optional[int] = len(_lowerCamelCase )
def _lowercase ( self : List[Any] ):
snake_case__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Union[str, Any] = self.get_config()
return config, pixel_values
def _lowercase ( self : int ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _lowercase ( self : Optional[int] , __A : Dict , __A : str ):
snake_case__ : Optional[Any] = FlaxRegNetModel(config=_lowerCamelCase )
snake_case__ : Optional[Any] = model(_lowerCamelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _lowercase ( self : List[Any] , __A : Any , __A : List[str] ):
snake_case__ : List[Any] = self.num_labels
snake_case__ : List[str] = FlaxRegNetForImageClassification(config=_lowerCamelCase )
snake_case__ : Any = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Optional[int] ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__, snake_case__ : List[str] = config_and_inputs
snake_case__ : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
a_ = False
a_ = False
a_ = False
def _lowercase ( self : int ):
snake_case__ : List[str] = FlaxRegNetModelTester(self )
snake_case__ : List[str] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def _lowercase ( self : int ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : str ):
return
def _lowercase ( self : int ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _lowercase ( self : Dict ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def _lowercase ( self : Optional[Any] ):
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def _lowercase ( self : Optional[int] ):
pass
def _lowercase ( self : Dict ):
snake_case__, snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple = model_class(_lowerCamelCase )
snake_case__ : Union[str, Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Tuple = [*signature.parameters.keys()]
snake_case__ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _lowercase ( self : List[Any] ):
def check_hidden_states_output(__A : List[str] , __A : Dict , __A : Union[str, Any] ):
snake_case__ : Tuple = model_class(_lowerCamelCase )
snake_case__ : Optional[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
snake_case__ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
snake_case__, snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : int = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _lowercase ( self : Optional[Any] ):
snake_case__, snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case__ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
snake_case__ : Dict = model_class(_lowerCamelCase )
@jax.jit
def model_jitted(__A : List[Any] , **__A : Tuple ):
return model(pixel_values=_lowerCamelCase , **_lowerCamelCase )
with self.subTest("JIT Enabled" ):
snake_case__ : List[str] = model_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
snake_case__ : Union[str, Any] = model_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : List[Any] ):
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040" ) if is_vision_available() else None
@slow
def _lowercase ( self : Optional[int] ):
snake_case__ : Any = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040" )
snake_case__ : Union[str, Any] = self.default_image_processor
snake_case__ : int = prepare_img()
snake_case__ : int = image_processor(images=_lowerCamelCase , return_tensors="np" )
snake_case__ : List[str] = model(**_lowerCamelCase )
# verify the logits
snake_case__ : Union[str, Any] = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
snake_case__ : Union[str, Any] = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
| 297 |
'''simple docstring'''
__magic_name__ : Dict =8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def __snake_case ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __snake_case ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 664 | 0 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __lowercase ( lowerCamelCase : int , lowerCamelCase : bool = True , lowerCamelCase : float = math.inf , lowerCamelCase : float = -math.inf , lowerCamelCase : float = math.inf , lowerCamelCase : float = -math.inf , lowerCamelCase : bool = False , lowerCamelCase : float = 100 , lowerCamelCase : float = 0.0_1 , lowerCamelCase : float = 1 , ):
UpperCamelCase_ : Union[str, Any] = False
UpperCamelCase_ : str = search_prob
UpperCamelCase_ : List[Any] = start_temperate
UpperCamelCase_ : int = []
UpperCamelCase_ : Optional[int] = 0
UpperCamelCase_ : List[str] = None
while not search_end:
UpperCamelCase_ : Dict = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCamelCase_ : Any = current_state
scores.append(lowerCamelCase_ )
iterations += 1
UpperCamelCase_ : List[str] = None
UpperCamelCase_ : Optional[Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCamelCase_ : Optional[int] = random.randint(0 , len(lowerCamelCase_ ) - 1 ) # picking a random neighbor
UpperCamelCase_ : List[Any] = neighbors.pop(lowerCamelCase_ )
UpperCamelCase_ : Optional[Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCamelCase_ : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCamelCase_ : Dict = picked_neighbor
else:
UpperCamelCase_ : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCamelCase_ : Union[str, Any] = picked_neighbor
UpperCamelCase_ : Dict = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCamelCase_ : List[str] = True
else:
UpperCamelCase_ : Optional[int] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCamelCase_ ) , lowerCamelCase_ )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def __lowercase ( lowerCamelCase : List[str] , lowerCamelCase : Optional[Any] ):
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
a_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def __lowercase ( lowerCamelCase : int , lowerCamelCase : List[str] ):
return (3 * x**2) - (6 * y)
a_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"""{local_min.score()}"""
)
a_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"""{local_min.score()}"""
)
| 417 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__magic_name__ : List[Any] =logging.getLogger(__name__)
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : str=-1 ) -> List[str]:
# in NER datasets, the last column is usually reserved for NER label
__magic_name__ = label_idx
def __A ( self : Any , _lowerCamelCase : str , _lowerCamelCase : Union[Split, str] ) -> List[InputExample]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = mode.value
__magic_name__ = os.path.join(_lowerCamelCase , f'{mode}.txt' )
__magic_name__ = 1
__magic_name__ = []
with open(_lowerCamelCase , encoding="utf-8" ) as f:
__magic_name__ = []
__magic_name__ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
__magic_name__ = []
__magic_name__ = []
else:
__magic_name__ = line.split(" " )
words.append(splits[0] )
if len(_lowerCamelCase ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
return examples
def __A ( self : Optional[Any] , _lowerCamelCase : TextIO , _lowerCamelCase : TextIO , _lowerCamelCase : List ) -> Union[str, Any]:
__magic_name__ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(_lowerCamelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__magic_name__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(_lowerCamelCase )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def __A ( self : Tuple , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
__magic_name__ = f.read().splitlines()
if "O" not in labels:
__magic_name__ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __init__( self : int ) -> str:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __A ( self : int , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
__magic_name__ = f.read().splitlines()
if "O" not in labels:
__magic_name__ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __A ( self : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[Split, str] ) -> List[InputExample]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = mode.value
__magic_name__ = os.path.join(_lowerCamelCase , f'{mode}.txt' )
__magic_name__ = 1
__magic_name__ = []
with open(_lowerCamelCase , encoding="utf-8" ) as f:
for sentence in parse_incr(_lowerCamelCase ):
__magic_name__ = []
__magic_name__ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
return examples
def __A ( self : Optional[int] , _lowerCamelCase : TextIO , _lowerCamelCase : TextIO , _lowerCamelCase : List ) -> Any:
__magic_name__ = 0
for sentence in parse_incr(_lowerCamelCase ):
__magic_name__ = preds_list[example_id]
__magic_name__ = ""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(_lowerCamelCase )
example_id += 1
def __A ( self : Dict , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 664 | 0 |
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
UpperCamelCase_ = ''
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class a_ (tr.AbstractTransform ):
def __init__( self , snake_case_ = " " ):
_lowerCAmelCase : Dict = sentence_delimiter
def __UpperCamelCase ( self , snake_case_ ):
return list(_lowerCamelCase )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[int] = []
for sent_idx, sentence in enumerate(_lowerCamelCase ):
chars.extend(self.process_string(_lowerCamelCase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(_lowerCamelCase ) - 1:
chars.append(self.sentence_delimiter )
return chars
UpperCamelCase_ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
UpperCamelCase_ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
UpperCamelCase_ = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
UpperCamelCase_ = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
UpperCamelCase_ = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ (datasets.Metric ):
def __UpperCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_=False ):
if concatenate_texts:
return jiwer.compute_measures(
_lowerCamelCase , _lowerCamelCase , truth_transform=_lowerCamelCase , hypothesis_transform=_lowerCamelCase , )["wer"]
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : Optional[int] = 0
for prediction, reference in zip(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Any = jiwer.compute_measures(
_lowerCamelCase , _lowerCamelCase , truth_transform=_lowerCamelCase , hypothesis_transform=_lowerCamelCase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 384 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0 ) -> None:
__magic_name__ , __magic_name__ = row, column
__magic_name__ = [[default_value for c in range(_lowerCamelCase )] for r in range(_lowerCamelCase )]
def __str__( self : Optional[Any] ) -> str:
__magic_name__ = f'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
__magic_name__ = 0
for row_vector in self.array:
for obj in row_vector:
__magic_name__ = max(_lowerCamelCase , len(str(_lowerCamelCase ) ) )
__magic_name__ = f'%{max_element_length}s'
# Make string and return
def single_line(_lowerCamelCase : list[float] ) -> str:
nonlocal string_format_identifier
__magic_name__ = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self : Optional[int] ) -> str:
return str(self )
def __A ( self : Optional[Any] , _lowerCamelCase : tuple[int, int] ) -> bool:
if not (isinstance(_lowerCamelCase , (list, tuple) ) and len(_lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Optional[int] , _lowerCamelCase : tuple[int, int] ) -> Any:
assert self.validate_indicies(_lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Tuple , _lowerCamelCase : tuple[int, int] , _lowerCamelCase : float ) -> None:
assert self.validate_indicies(_lowerCamelCase )
__magic_name__ = value
def __add__( self : Union[str, Any] , _lowerCamelCase : Matrix ) -> Matrix:
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c] + another[r, c]
return result
def __neg__( self : int ) -> Matrix:
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = -self[r, c]
return result
def __sub__( self : Optional[int] , _lowerCamelCase : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : Optional[int] , _lowerCamelCase : int | float | Matrix ) -> Matrix:
if isinstance(_lowerCamelCase , (int, float) ): # Scalar multiplication
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c] * another
return result
elif isinstance(_lowerCamelCase , _lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
__magic_name__ = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__magic_name__ = f'Unsupported type given for another ({type(_lowerCamelCase )})'
raise TypeError(_lowerCamelCase )
def __A ( self : Optional[int] ) -> Matrix:
__magic_name__ = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c]
return result
def __A ( self : int , _lowerCamelCase : Matrix , _lowerCamelCase : Matrix ) -> Any:
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__magic_name__ = v.transpose()
__magic_name__ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = Matrix(3 , 3 , 0 )
for i in range(3 ):
__magic_name__ = 1
print(F'a^(-1) is {ainv}' )
# u, v
__magic_name__ = Matrix(3 , 1 , 0 )
__magic_name__ , __magic_name__ , __magic_name__ = 1, 2, -3
__magic_name__ = Matrix(3 , 1 , 0 )
__magic_name__ , __magic_name__ , __magic_name__ = 4, -2, 5
print(F'u is {u}' )
print(F'v is {v}' )
print(F'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(F'(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase_ , lowerCamelCase_ )}' )
def __snake_case ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 664 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 169 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__magic_name__ : List[Any] =logging.getLogger(__name__)
__magic_name__ : int ='Hello world! cécé herlolip'
__magic_name__ : List[Any] =namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def __snake_case ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict ):
'''simple docstring'''
__magic_name__ = BertAbsConfig(
temp_dir="." , finetune_bert=lowerCamelCase_ , large=lowerCamelCase_ , share_emb=lowerCamelCase_ , use_bert_emb=lowerCamelCase_ , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__magic_name__ = torch.load(lowerCamelCase_ , lambda lowerCamelCase_ , lowerCamelCase_ : storage )
__magic_name__ = AbsSummarizer(lowerCamelCase_ , torch.device("cpu" ) , lowerCamelCase_ )
original.eval()
__magic_name__ = BertAbsSummarizer(lowerCamelCase_ , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
__magic_name__ = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
__magic_name__ = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) )
__magic_name__ = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
__magic_name__ = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) )
__magic_name__ = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__magic_name__ = encoder_input_ids
__magic_name__ = decoder_input_ids
__magic_name__ = __magic_name__ = None
__magic_name__ = None
__magic_name__ = __magic_name__ = None
__magic_name__ = __magic_name__ = None
__magic_name__ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__magic_name__ = original(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
__magic_name__ = original.generator(lowerCamelCase_ )
__magic_name__ = new_model(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
__magic_name__ = new_model.generator(lowerCamelCase_ )
__magic_name__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowerCamelCase_ ) )
__magic_name__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowerCamelCase_ ) )
__magic_name__ = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
__magic_name__ : Dict =argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
__magic_name__ : Any =parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 664 | 0 |
from functools import reduce
UpperCamelCase_ : int = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def UpperCamelCase ( _UpperCAmelCase : str = N ) -> Union[str, Any]:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _UpperCAmelCase , _UpperCAmelCase : str(int(lowerCamelCase_ ) * int(lowerCamelCase_ ) ) , n[i : i + 13] ) )
for i in range(len(lowerCamelCase_ ) - 12 ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 461 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ) -> str:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__magic_name__ = [[1, 2, 4], [1, 2, 3, 4]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
self.assertTrue(isinstance(dc.token_ids , _lowerCamelCase ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __A ( self : List[Any] ) -> str:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__magic_name__ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(_lowerCamelCase ) # fails here
def __A ( self : List[Any] ) -> int:
__magic_name__ = [[1, 2, 3], [1, 2, 4]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(3 )
__magic_name__ = stepped is True and completed is True and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __A ( self : Any ) -> Union[str, Any]:
__magic_name__ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 664 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__lowerCAmelCase = logging.get_logger(__name__)
@dataclass
class lowerCamelCase ( __lowerCamelCase ):
UpperCamelCase_ : Union[str, Any] = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self :List[Any] , **lowercase :Dict ) -> Optional[int]:
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE = deprecated_arg[3:]
SCREAMING_SNAKE_CASE = not kwargs.pop(_lowerCamelCase )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
SCREAMING_SNAKE_CASE = kwargs.pop('''tpu_name''' , self.tpu_name )
SCREAMING_SNAKE_CASE = kwargs.pop('''device_idx''' , self.device_idx )
SCREAMING_SNAKE_CASE = kwargs.pop('''eager_mode''' , self.eager_mode )
SCREAMING_SNAKE_CASE = kwargs.pop('''use_xla''' , self.use_xla )
super().__init__(**_lowerCamelCase )
UpperCamelCase_ : str = field(
default=__lowerCamelCase , metadata={'help': 'Name of TPU'} , )
UpperCamelCase_ : int = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
UpperCamelCase_ : bool = field(default=__lowerCamelCase , metadata={'help': 'Benchmark models in eager model.'} )
UpperCamelCase_ : bool = field(
default=__lowerCamelCase , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def snake_case__ ( self :Tuple ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
"""simple docstring"""
requires_backends(self , ['''tf'''] )
SCREAMING_SNAKE_CASE = None
if self.tpu:
try:
if self.tpu_name:
SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
SCREAMING_SNAKE_CASE = None
return tpu
@cached_property
def snake_case__ ( self :Tuple ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
"""simple docstring"""
requires_backends(self , ['''tf'''] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
SCREAMING_SNAKE_CASE = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' )
SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU
SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def snake_case__ ( self :Union[str, Any] ) -> bool:
"""simple docstring"""
requires_backends(self , ['''tf'''] )
return self._setup_tpu is not None
@property
def snake_case__ ( self :str ) -> "tf.distribute.Strategy":
"""simple docstring"""
requires_backends(self , ['''tf'''] )
return self._setup_strategy
@property
def snake_case__ ( self :Any ) -> Any:
"""simple docstring"""
requires_backends(self , ['''tf'''] )
return tf.config.list_physical_devices('''GPU''' )
@property
def snake_case__ ( self :Union[str, Any] ) -> int:
"""simple docstring"""
requires_backends(self , ['''tf'''] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def snake_case__ ( self :List[Any] ) -> bool:
"""simple docstring"""
return self.n_gpu > 0 | 201 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__magic_name__ : Dict ={
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_28,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls : Any ) -> Union[str, Any]:
__magic_name__ = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def __A ( cls : Any ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def __A ( self : Optional[Any] ) -> Dict:
__magic_name__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowerCamelCase , repo_id="test-config" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : str ) -> Optional[int]:
__magic_name__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_lowerCamelCase , repo_id="valid_org/test-config-org" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : Optional[int] ) -> Union[str, Any]:
CustomConfig.register_for_auto_class()
__magic_name__ = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
__magic_name__ = AutoConfig.from_pretrained(f'{USER}/test-dynamic-config' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Optional[int] ) -> Optional[Any]:
__magic_name__ = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__magic_name__ = c.n_embd + 1 # int
__magic_name__ = c.resid_pdrop + 1.0 # float
__magic_name__ = not c.scale_attn_weights # bool
__magic_name__ = c.summary_type + "foo" # str
c.update_from_string(
f'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(_lowerCamelCase , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(_lowerCamelCase , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(_lowerCamelCase , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(_lowerCamelCase , c.summary_type , "mismatch for key: summary_type" )
def __A ( self : List[Any] ) -> Union[str, Any]:
__magic_name__ = PretrainedConfig()
__magic_name__ = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_lowerCamelCase , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
__magic_name__ = [key for key, value in config_common_kwargs.items() if value == getattr(_lowerCamelCase , _lowerCamelCase )]
if len(_lowerCamelCase ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f' {", ".join(_lowerCamelCase )}.' )
def __A ( self : List[Any] ) -> List[Any]:
with self.assertRaises(_lowerCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(_lowerCamelCase )
def __A ( self : Tuple ) -> int:
# A mock response for an HTTP head request to emulate server down
__magic_name__ = mock.Mock()
__magic_name__ = 5_00
__magic_name__ = {}
__magic_name__ = HTTPError
__magic_name__ = {}
# Download this model to make sure it's in the cache.
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=_lowerCamelCase ) as mock_head:
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def __A ( self : Union[str, Any] ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
__magic_name__ = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def __A ( self : Dict ) -> Optional[int]:
__magic_name__ = AutoConfig.from_pretrained("bert-base-cased" )
__magic_name__ = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_lowerCamelCase )
__magic_name__ = 2
json.dump(configuration.to_dict() , open(os.path.join(_lowerCamelCase , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__magic_name__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__magic_name__ = ["config.42.0.0.json"]
__magic_name__ = 7_68
configuration.save_pretrained(_lowerCamelCase )
shutil.move(os.path.join(_lowerCamelCase , "config.4.0.0.json" ) , os.path.join(_lowerCamelCase , "config.42.0.0.json" ) )
__magic_name__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def __A ( self : Optional[int] ) -> str:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__magic_name__ = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
__magic_name__ = "v4.0.0"
__magic_name__ , __magic_name__ = new_transformers.models.auto.AutoConfig.from_pretrained(
_lowerCamelCase , return_unused_kwargs=_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_lowerCamelCase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__magic_name__ = "v3.0.0"
__magic_name__ = old_transformers.models.auto.AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 664 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : Dict = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[Any] = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , _lowerCamelCase : str , _lowerCamelCase : Optional[Any]=7 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : List[Any]=18 , _lowerCamelCase : Union[str, Any]=30 , _lowerCamelCase : Tuple=4_00 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=True , _lowerCamelCase : Dict=[0.5, 0.5, 0.5] , _lowerCamelCase : Dict=[0.5, 0.5, 0.5] , ) -> Dict:
__magic_name__ = size if size is not None else {"height": 18, "width": 18}
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = min_resolution
__magic_name__ = max_resolution
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = do_normalize
__magic_name__ = image_mean
__magic_name__ = image_std
def __A ( self : int ) -> List[str]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase_ ( A , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = DPTImageProcessor if is_vision_available() else None
def __A ( self : Dict ) -> Any:
__magic_name__ = DPTImageProcessingTester(self )
@property
def __A ( self : str ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Tuple ) -> List[str]:
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "size" ) )
def __A ( self : List[str] ) -> List[Any]:
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __A ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Dict ) -> Optional[Any]:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Optional[int] ) -> Dict:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 664 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ : Optional[Any] = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 120 |
'''simple docstring'''
import numpy
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCamelCase : numpy.ndarray , _lowerCamelCase : numpy.ndarray ) -> None:
__magic_name__ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__magic_name__ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__magic_name__ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__magic_name__ = numpy.random.rand(3 , 1 )
# Real output values provided.
__magic_name__ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__magic_name__ = numpy.zeros(output_array.shape )
def __A ( self : int ) -> numpy.ndarray:
__magic_name__ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __A ( self : Dict ) -> None:
__magic_name__ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__magic_name__ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__magic_name__ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __A ( self : Optional[int] , _lowerCamelCase : numpy.ndarray , _lowerCamelCase : int , _lowerCamelCase : bool ) -> None:
for iteration in range(1 , iterations + 1 ):
__magic_name__ = self.feedforward()
self.back_propagation()
if give_loss:
__magic_name__ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'Iteration {iteration} Loss: {loss}' )
def __A ( self : Tuple , _lowerCamelCase : numpy.ndarray ) -> int:
__magic_name__ = input_arr
__magic_name__ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def __snake_case ( lowerCamelCase_ : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def __snake_case ( lowerCamelCase_ : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__magic_name__ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__magic_name__ = TwoHiddenLayerNeuralNetwork(
input_array=lowerCamelCase_ , output_array=lowerCamelCase_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=lowerCamelCase_ , iterations=10 , give_loss=lowerCamelCase_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 664 | 0 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowercase__ : Any = BarthezTokenizer
lowercase__ : Dict = BarthezTokenizerFast
lowercase__ : List[Any] = True
lowercase__ : List[str] = True
def lowercase_ ( self ):
'''simple docstring'''
super().setUp()
A__ = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_lowerCamelCase )
A__ = tokenizer
def lowercase_ ( self ):
'''simple docstring'''
A__ = "<pad>"
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def lowercase_ ( self ):
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(_lowerCamelCase ) , 10_11_22 )
def lowercase_ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def lowercase_ ( self ):
'''simple docstring'''
A__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
A__ = [0, 57, 30_18, 7_03_07, 91, 2]
A__ = self.tokenizer(
_lowerCamelCase , max_length=len(_lowerCamelCase ) , padding=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
A__ = batch.input_ids.tolist()[0]
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def lowercase_ ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = "I was born in 92000, and this is falsé."
A__ = tokenizer.tokenize(_lowerCamelCase )
A__ = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
A__ = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A__ = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(_lowerCamelCase )
A__ = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ = {"input_ids": [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
A__ = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=_lowerCamelCase , ) | 337 |
'''simple docstring'''
import torch
from transformers import AutoModel
class UpperCamelCase_ ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Any , _lowerCamelCase : Optional[int]="sayef/fsner-bert-base-uncased" ) -> List[Any]:
super(_lowerCamelCase , self ).__init__()
__magic_name__ = AutoModel.from_pretrained(_lowerCamelCase , return_dict=_lowerCamelCase )
__magic_name__ = torch.nn.CosineSimilarity(3 , 1e-08 )
__magic_name__ = torch.nn.Softmax(dim=1 )
def __A ( self : Tuple , **_lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return self.bert(**_lowerCamelCase ).last_hidden_state
def __A ( self : Dict , _lowerCamelCase : Dict ) -> Dict:
return token_embeddings.sum(2 , keepdim=_lowerCamelCase )
def __A ( self : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Tuple=1 ) -> Optional[Any]:
return self.softmax(T * self.cos(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] ) -> List[str]:
__magic_name__ = W_supports["sizes"].tolist()
__magic_name__ = W_supports["start_token_id"].item()
__magic_name__ = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__magic_name__ = self.BERT(**_lowerCamelCase )
__magic_name__ = self.BERT(**_lowerCamelCase )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = W_supports["input_ids"] == start_token_id
__magic_name__ = W_supports["input_ids"] == end_token_id
for i, size in enumerate(_lowerCamelCase ):
if i == 0:
__magic_name__ = 0
else:
__magic_name__ = support_sizes[i - 1]
__magic_name__ = S[s : s + size][start_token_masks[s : s + size]]
__magic_name__ = S[s : s + size][end_token_masks[s : s + size]]
__magic_name__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__magic_name__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__magic_name__ = torch.vstack((p_starts, p_start) )
__magic_name__ = torch.vstack((p_ends, p_end) )
else:
__magic_name__ = p_start
__magic_name__ = p_end
return p_starts, p_ends
| 664 | 0 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
SCREAMING_SNAKE_CASE__ : Dict = ''
SCREAMING_SNAKE_CASE__ : List[Any] = ''
SCREAMING_SNAKE_CASE__ : Dict = ''
SCREAMING_SNAKE_CASE__ : Tuple = 1 # (0 is vertical, 1 is horizontal)
def A ( ) -> Dict:
lowerCamelCase , lowerCamelCase : Optional[int] = get_dataset(lowerCamelCase_ ,lowerCamelCase_ )
print("Processing..." )
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = update_image_and_anno(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
for index, image in enumerate(lowerCamelCase_ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowerCamelCase : Dict = random_chars(32 )
lowerCamelCase : Optional[int] = paths[index].split(os.sep )[-1].rsplit("." ,1 )[0]
lowerCamelCase : List[Any] = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(f'''/{file_root}.jpg''' ,lowerCamelCase_ ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Success {index+1}/{len(lowerCamelCase_ )} with {file_name}''' )
lowerCamelCase : Optional[int] = []
for anno in new_annos[index]:
lowerCamelCase : Any = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(lowerCamelCase_ )
with open(f'''/{file_root}.txt''' ,"w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
lowerCamelCase : Optional[int] = []
lowerCamelCase : Optional[int] = []
for label_file in glob.glob(os.path.join(lowerCamelCase_ ,"*.txt" ) ):
lowerCamelCase : Dict = label_file.split(os.sep )[-1].rsplit("." ,1 )[0]
with open(lowerCamelCase_ ) as in_file:
lowerCamelCase : Union[str, Any] = in_file.readlines()
lowerCamelCase : int = os.path.join(lowerCamelCase_ ,f'''{label_name}.jpg''' )
lowerCamelCase : int = []
for obj_list in obj_lists:
lowerCamelCase : List[Any] = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(lowerCamelCase_ )
labels.append(lowerCamelCase_ )
return img_paths, labels
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 1 ) -> Union[str, Any]:
lowerCamelCase : Optional[int] = []
lowerCamelCase : List[Any] = []
lowerCamelCase : List[str] = []
for idx in range(len(lowerCamelCase_ ) ):
lowerCamelCase : List[Any] = []
lowerCamelCase : Any = img_list[idx]
path_list.append(lowerCamelCase_ )
lowerCamelCase : Optional[int] = anno_list[idx]
lowerCamelCase : Optional[int] = cva.imread(lowerCamelCase_ )
if flip_type == 1:
lowerCamelCase : List[Any] = cva.flip(lowerCamelCase_ ,lowerCamelCase_ )
for bbox in img_annos:
lowerCamelCase : Any = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
lowerCamelCase : Any = cva.flip(lowerCamelCase_ ,lowerCamelCase_ )
for bbox in img_annos:
lowerCamelCase : str = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(lowerCamelCase_ )
new_imgs_list.append(lowerCamelCase_ )
return new_imgs_list, new_annos_lists, path_list
def A ( _SCREAMING_SNAKE_CASE = 32 ) -> Dict:
assert number_char > 1, "The number of character should greater than 1"
lowerCamelCase : Tuple = ascii_lowercase + digits
return "".join(random.choice(lowerCamelCase_ ) for _ in range(lowerCamelCase_ ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 311 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 664 | 0 |
from __future__ import annotations
from typing import Any
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
if not postfix_notation:
return 0
lowerCamelCase = {"+", "-", "*", "/"}
lowerCamelCase = []
for token in postfix_notation:
if token in operations:
lowerCamelCase , lowerCamelCase = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowerCamelCase_ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod() | 623 |
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def __snake_case ( lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
__magic_name__ = AutoConfig.from_pretrained(lowerCamelCase_ )
__magic_name__ = FlaxAutoModelForSeqaSeqLM.from_config(config=lowerCamelCase_ )
__magic_name__ = checkpoints.load_tax_checkpoint(lowerCamelCase_ )
__magic_name__ = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
__magic_name__ = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
__magic_name__ = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
__magic_name__ = F'layers_{str(lowerCamelCase_ )}'
# Self-Attention
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
__magic_name__ = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
__magic_name__ = flax_model.params["encoder"]["block"][str(lowerCamelCase_ )]["layer"]
__magic_name__ = tax_attention_key
__magic_name__ = tax_attention_out
__magic_name__ = tax_attention_query
__magic_name__ = tax_attention_value
__magic_name__ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = tax_global_layer_norm
if split_mlp_wi:
__magic_name__ = tax_mlp_wi_a
__magic_name__ = tax_mlp_wi_a
else:
__magic_name__ = tax_mlp_wi
__magic_name__ = tax_mlp_wo
__magic_name__ = tax_mlp_layer_norm
__magic_name__ = flax_model_encoder_layer_block
# Only for layer 0:
__magic_name__ = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
__magic_name__ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
__magic_name__ = tax_encoder_global_rel_embedding
# Assigning
__magic_name__ = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
__magic_name__ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
__magic_name__ = F'layers_{str(lowerCamelCase_ )}'
# Self-Attention
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
__magic_name__ = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
__magic_name__ = tax_enc_dec_attention_module["key"]["kernel"]
__magic_name__ = tax_enc_dec_attention_module["out"]["kernel"]
__magic_name__ = tax_enc_dec_attention_module["query"]["kernel"]
__magic_name__ = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
__magic_name__ = flax_model.params["decoder"]["block"][str(lowerCamelCase_ )]["layer"]
__magic_name__ = tax_attention_key
__magic_name__ = tax_attention_out
__magic_name__ = tax_attention_query
__magic_name__ = tax_attention_value
__magic_name__ = tax_pre_attention_layer_norm
__magic_name__ = tax_enc_dec_attention_key
__magic_name__ = tax_enc_dec_attention_out
__magic_name__ = tax_enc_dec_attention_query
__magic_name__ = tax_enc_dec_attention_value
__magic_name__ = tax_cross_layer_norm
if split_mlp_wi:
__magic_name__ = tax_mlp_wi_a
__magic_name__ = tax_mlp_wi_a
else:
__magic_name__ = tax_mlp_wi
__magic_name__ = tax_mlp_wo
__magic_name__ = txa_mlp_layer_norm
__magic_name__ = flax_model_decoder_layer_block
# Decoder Normalization
__magic_name__ = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
__magic_name__ = txa_decoder_norm
# Only for layer 0:
__magic_name__ = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
__magic_name__ = tax_decoder_rel_embedding
# Token Embeddings
__magic_name__ = tax_model["target"]["token_embedder"]["embedding"]
__magic_name__ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
__magic_name__ = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(lowerCamelCase_ )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
__magic_name__ : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
__magic_name__ : Optional[int] =parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 664 | 0 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = MobileBertTokenizer
a_ = MobileBertTokenizerFast
a_ = True
a_ = True
a_ = filter_non_english
a_ = '''google/mobilebert-uncased'''
def _lowercase ( self : Optional[int] ):
super().setUp()
snake_case__ : Any = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
snake_case__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
snake_case__ : Optional[Any] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def _lowercase ( self : List[Any] , __A : Optional[int] ):
snake_case__ : Optional[Any] = "UNwant\u00E9d,running"
snake_case__ : Union[str, Any] = "unwanted, running"
return input_text, output_text
def _lowercase ( self : str ):
snake_case__ : Union[str, Any] = self.tokenizer_class(self.vocab_file )
snake_case__ : Union[str, Any] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_lowerCamelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def _lowercase ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
snake_case__ : Tuple = self.get_tokenizer()
snake_case__ : Dict = self.get_rust_tokenizer()
snake_case__ : Any = "UNwant\u00E9d,running"
snake_case__ : Union[str, Any] = tokenizer.tokenize(_lowerCamelCase )
snake_case__ : Tuple = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
snake_case__ : Any = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
snake_case__ : Any = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
snake_case__ : List[Any] = self.get_rust_tokenizer()
snake_case__ : Dict = tokenizer.encode(_lowerCamelCase )
snake_case__ : Dict = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# With lower casing
snake_case__ : Optional[Any] = self.get_tokenizer(do_lower_case=_lowerCamelCase )
snake_case__ : Optional[int] = self.get_rust_tokenizer(do_lower_case=_lowerCamelCase )
snake_case__ : List[Any] = "UNwant\u00E9d,running"
snake_case__ : List[Any] = tokenizer.tokenize(_lowerCamelCase )
snake_case__ : Dict = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
snake_case__ : List[str] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
snake_case__ : Tuple = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
snake_case__ : List[Any] = self.get_rust_tokenizer()
snake_case__ : Any = tokenizer.encode(_lowerCamelCase )
snake_case__ : Optional[int] = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def _lowercase ( self : List[str] ):
snake_case__ : List[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def _lowercase ( self : Any ):
snake_case__ : Optional[Any] = BasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowercase ( self : Any ):
snake_case__ : Union[str, Any] = BasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def _lowercase ( self : Tuple ):
snake_case__ : Optional[int] = BasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = BasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowercase ( self : Tuple ):
snake_case__ : Union[str, Any] = BasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def _lowercase ( self : Any ):
snake_case__ : Dict = BasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def _lowercase ( self : Tuple ):
snake_case__ : List[str] = BasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def _lowercase ( self : List[str] ):
snake_case__ : int = BasicTokenizer(do_lower_case=_lowerCamelCase , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def _lowercase ( self : Tuple ):
snake_case__ : List[Any] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
snake_case__ : Dict = {}
for i, token in enumerate(_lowerCamelCase ):
snake_case__ : List[str] = i
snake_case__ : Dict = WordpieceTokenizer(vocab=_lowerCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def _lowercase ( self : str ):
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def _lowercase ( self : Union[str, Any] ):
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def _lowercase ( self : int ):
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def _lowercase ( self : str ):
snake_case__ : int = self.get_tokenizer()
snake_case__ : Tuple = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_lowerCamelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_lowerCamelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def _lowercase ( self : int ):
snake_case__ : Tuple = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
snake_case__ : str = tokenizer.encode("sequence builders" , add_special_tokens=_lowerCamelCase )
snake_case__ : Any = tokenizer.encode("multi-sequence build" , add_special_tokens=_lowerCamelCase )
snake_case__ : List[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
snake_case__ : Dict = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def _lowercase ( self : Optional[int] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case__ : Dict = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
snake_case__ : str = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
snake_case__ : List[str] = tokenizer_r.encode_plus(
_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase , )
snake_case__ : Optional[int] = tokenizer_r.do_lower_case if hasattr(_lowerCamelCase , "do_lower_case" ) else False
snake_case__ : int = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "Allen"),
((2_1, 2_3), "##NL"),
((2_3, 2_4), "##P"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "allen"),
((2_1, 2_3), "##nl"),
((2_3, 2_4), "##p"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def _lowercase ( self : Tuple ):
snake_case__ : int = ["的", "人", "有"]
snake_case__ : str = "".join(_lowerCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case__ : Dict = True
snake_case__ : Union[str, Any] = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
snake_case__ : List[str] = tokenizer_p.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
snake_case__ : Any = tokenizer_r.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
snake_case__ : Tuple = tokenizer_r.convert_ids_to_tokens(_lowerCamelCase )
snake_case__ : Tuple = tokenizer_p.convert_ids_to_tokens(_lowerCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
snake_case__ : Optional[int] = False
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
snake_case__ : str = tokenizer_r.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
snake_case__ : List[Any] = tokenizer_p.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
snake_case__ : Tuple = tokenizer_r.convert_ids_to_tokens(_lowerCamelCase )
snake_case__ : Any = tokenizer_p.convert_ids_to_tokens(_lowerCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
snake_case__ : Optional[int] = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(_lowerCamelCase )
]
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
| 297 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCamelCase_ ( unittest.TestCase , A ):
"""simple docstring"""
def __A ( self : Optional[int] ) -> Any:
__magic_name__ = load_tool("text-to-speech" )
self.tool.setup()
def __A ( self : Union[str, Any] ) -> int:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__magic_name__ = self.tool("hey" )
__magic_name__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def __A ( self : List[str] ) -> int:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__magic_name__ = self.tool("hey" )
__magic_name__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 664 | 0 |
def __lowercase ( lowerCamelCase : int ):
if num <= 0:
raise ValueError('Input must be a positive integer' )
UpperCamelCase_ : int = [True] * (num + 1)
UpperCamelCase_ : str = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , lowerCamelCase_ ):
UpperCamelCase_ : Optional[int] = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = int(input('Enter a positive integer: ').strip())
print(prime_sieve_eratosthenes(user_num))
| 417 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__magic_name__ : Dict =re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
__magic_name__ : int =10
__magic_name__ : Union[str, Any] =2_56
def __snake_case ( lowerCamelCase_ : List[str] ):
'''simple docstring'''
if len(lowerCamelCase_ ) < MIN_NUM_TOKENS:
return None
__magic_name__ = MinHash(num_perm=lowerCamelCase_ )
for token in set(lowerCamelCase_ ):
min_hash.update(token.encode() )
return min_hash
def __snake_case ( lowerCamelCase_ : str ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(lowerCamelCase_ ) if len(t.strip() ) > 0}
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , *,
_lowerCamelCase : float = 0.85 , ) -> Optional[Any]:
__magic_name__ = duplication_jaccard_threshold
__magic_name__ = NUM_PERM
__magic_name__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__magic_name__ = defaultdict(_lowerCamelCase )
def __A ( self : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : MinHash ) -> None:
__magic_name__ = self._index.query(_lowerCamelCase )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_lowerCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_lowerCamelCase )
def __A ( self : Union[str, Any] ) -> List[List[Dict]]:
__magic_name__ = []
for base, duplicates in self._duplicate_clusters.items():
__magic_name__ = [base] + list(_lowerCamelCase )
# reformat the cluster to be a list of dict
__magic_name__ = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(_lowerCamelCase )
return duplicate_clusters
def __A ( self : Tuple , _lowerCamelCase : Tuple ) -> None:
__magic_name__ = self.get_duplicate_clusters()
with open(_lowerCamelCase , "w" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
def __snake_case ( lowerCamelCase_ : List[Any] ):
'''simple docstring'''
__magic_name__ , __magic_name__ = element
__magic_name__ = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def __snake_case ( lowerCamelCase_ : Type[Dataset] ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCamelCase_ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def __snake_case ( lowerCamelCase_ : Type[Dataset] , lowerCamelCase_ : float ):
'''simple docstring'''
__magic_name__ = DuplicationIndex(duplication_jaccard_threshold=lowerCamelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCamelCase_ ) ) , max_queue_size=100 ) ):
di.add(lowerCamelCase_ , lowerCamelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
'''simple docstring'''
__magic_name__ = get_tokens(lowerCamelCase_ )
__magic_name__ = get_tokens(lowerCamelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__magic_name__ : List[str] =None
def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
__magic_name__ = []
for elementa in cluster:
__magic_name__ = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
__magic_name__ = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(lowerCamelCase_ , lowerCamelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__magic_name__ = 1
extremes.append(lowerCamelCase_ )
return extremes
def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
global _shared_dataset
__magic_name__ = dataset
__magic_name__ = []
__magic_name__ = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCamelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCamelCase_ , lowerCamelCase_ , ) , total=len(lowerCamelCase_ ) , ):
extremes_list.append(lowerCamelCase_ )
return extremes_list
def __snake_case ( lowerCamelCase_ : Type[Dataset] , lowerCamelCase_ : float = 0.85 ):
'''simple docstring'''
__magic_name__ = make_duplicate_clusters(lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
__magic_name__ = {}
__magic_name__ = find_extremes(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for extremes in extremes_clusters:
for element in extremes:
__magic_name__ = element
__magic_name__ = duplicate_indices - set(extreme_dict.keys() )
__magic_name__ = dataset.filter(lambda lowerCamelCase_ , lowerCamelCase_ : idx not in remove_indices , with_indices=lowerCamelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__magic_name__ = element["base_index"] in extreme_dict
if element["is_extreme"]:
__magic_name__ = extreme_dict[element["base_index"]]["copies"]
print(F'Original dataset size: {len(lowerCamelCase_ )}' )
print(F'Number of duplicate clusters: {len(lowerCamelCase_ )}' )
print(F'Files in duplicate cluster: {len(lowerCamelCase_ )}' )
print(F'Unique files in duplicate cluster: {len(lowerCamelCase_ )}' )
print(F'Filtered dataset size: {len(lowerCamelCase_ )}' )
return ds_filter, duplicate_clusters
| 664 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a_ (_a , unittest.TestCase ):
__lowerCAmelCase : Optional[int] = DiTPipeline
__lowerCAmelCase : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__lowerCAmelCase : str = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
__lowerCAmelCase : List[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__lowerCAmelCase : Tuple = False
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = TransformeraDModel(
sample_size=1_6 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_lowerCamelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_0_0_0 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=_lowerCamelCase , )
_lowerCAmelCase : str = AutoencoderKL()
_lowerCAmelCase : Union[str, Any] = DDIMScheduler()
_lowerCAmelCase : List[Any] = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def __UpperCamelCase ( self , snake_case_ , snake_case_=0 ):
if str(_lowerCamelCase ).startswith("""mps""" ):
_lowerCAmelCase : str = torch.manual_seed(_lowerCamelCase )
else:
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
_lowerCAmelCase : Tuple = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = """cpu"""
_lowerCAmelCase : List[Any] = self.get_dummy_components()
_lowerCAmelCase : Dict = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
_lowerCAmelCase : str = self.get_dummy_inputs(_lowerCamelCase )
_lowerCAmelCase : int = pipe(**_lowerCamelCase ).images
_lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 1_6, 1_6, 3) )
_lowerCAmelCase : Tuple = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_lowerCAmelCase : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCamelCase , 1E-3 )
def __UpperCamelCase ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_lowerCamelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __UpperCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : int = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase : Optional[Any] = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase : Any = pipe.get_label_ids(_lowerCamelCase )
_lowerCAmelCase : List[Any] = pipe(_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=4_0 , output_type="""np""" ).images
for word, image in zip(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : List[str] = load_numpy(
f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1E-2
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase : str = ["""vase""", """umbrella"""]
_lowerCAmelCase : Optional[int] = pipe.get_label_ids(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=2_5 , output_type="""np""" ).images
for word, image in zip(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
f'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 384 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
__magic_name__ : Optional[int] =logging.get_logger(__name__)
__magic_name__ : Tuple ='The Nymphenburg Palace is a beautiful palace in Munich!'
def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
'''simple docstring'''
__magic_name__ = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1e-5,
"token_type_vocab_size": 2,
}
__magic_name__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__magic_name__ = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , lowerCamelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__magic_name__ = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
__magic_name__ = os.path.join(get_home_dir() , "models" )
__magic_name__ = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ )
__magic_name__ = nlp.model.BERTModel(
lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , )
original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ )
__magic_name__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
__magic_name__ = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(lowerCamelCase_ ),
}
__magic_name__ = BertConfig.from_dict(lowerCamelCase_ )
__magic_name__ = BertForMaskedLM(lowerCamelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase_ : Any ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int ):
__magic_name__ = hf_param.shape
__magic_name__ = to_torch(params[gluon_param] )
__magic_name__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__magic_name__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__magic_name__ = hf_bort_model.bert.encoder.layer[i]
# self attention
__magic_name__ = layer.attention.self
__magic_name__ = check_and_map_params(
self_attn.key.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
__magic_name__ = check_and_map_params(
self_attn.key.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
__magic_name__ = check_and_map_params(
self_attn.query.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
__magic_name__ = check_and_map_params(
self_attn.query.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
__magic_name__ = check_and_map_params(
self_attn.value.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
__magic_name__ = check_and_map_params(
self_attn.value.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
__magic_name__ = layer.attention.output
__magic_name__ = check_and_map_params(
self_output.dense.bias , F'encoder.transformer_cells.{i}.proj.bias' )
__magic_name__ = check_and_map_params(
self_output.dense.weight , F'encoder.transformer_cells.{i}.proj.weight' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.layer_norm.beta' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
__magic_name__ = layer.intermediate
__magic_name__ = check_and_map_params(
intermediate.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
__magic_name__ = check_and_map_params(
intermediate.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
__magic_name__ = layer.output
__magic_name__ = check_and_map_params(
bert_output.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
__magic_name__ = check_and_map_params(
bert_output.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__magic_name__ = RobertaTokenizer.from_pretrained("roberta-base" )
__magic_name__ = tokenizer.encode_plus(lowerCamelCase_ )["input_ids"]
# Get gluon output
__magic_name__ = mx.nd.array([input_ids] )
__magic_name__ = original_bort(inputs=lowerCamelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase_ )
__magic_name__ = BertModel.from_pretrained(lowerCamelCase_ )
hf_bort_model.eval()
__magic_name__ = tokenizer.encode_plus(lowerCamelCase_ , return_tensors="pt" )
__magic_name__ = hf_bort_model(**lowerCamelCase_ )[0]
__magic_name__ = output_gluon[0].asnumpy()
__magic_name__ = output_hf[0].detach().numpy()
__magic_name__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__magic_name__ = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , lowerCamelCase_ )
if __name__ == "__main__":
__magic_name__ : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ : Optional[Any] =parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 664 | 0 |
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
a = logging.get_logger(__name__)
@add_end_docstrings(
_a , r'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , )
class SCREAMING_SNAKE_CASE__ ( _a ):
def __lowercase ( self : Any , lowerCAmelCase : GenericTensor ):
if self.framework == "tf":
lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def __lowercase ( self : str , lowerCAmelCase : GenericTensor ):
lowerCAmelCase = self.get_masked_index(_lowerCamelCase )
lowerCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , f'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , )
def __lowercase ( self : int , lowerCAmelCase : GenericTensor ):
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowerCamelCase )
def __lowercase ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Any=None , **lowerCAmelCase : List[str] ):
if return_tensors is None:
lowerCAmelCase = self.framework
lowerCAmelCase = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase )
self.ensure_exactly_one_mask_token(_lowerCamelCase )
return model_inputs
def __lowercase ( self : List[str] , lowerCAmelCase : int ):
lowerCAmelCase = self.model(**_lowerCamelCase )
lowerCAmelCase = model_inputs["""input_ids"""]
return model_outputs
def __lowercase ( self : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]=5 , lowerCAmelCase : Dict=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
lowerCAmelCase = target_ids.shape[0]
lowerCAmelCase = model_outputs["""input_ids"""][0]
lowerCAmelCase = model_outputs["""logits"""]
if self.framework == "tf":
lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowerCAmelCase = outputs.numpy()
lowerCAmelCase = outputs[0, masked_index, :]
lowerCAmelCase = stable_softmax(_lowerCamelCase , axis=-1 )
if target_ids is not None:
lowerCAmelCase = tf.gather_nd(tf.squeeze(_lowerCamelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
lowerCAmelCase = tf.expand_dims(_lowerCamelCase , 0 )
lowerCAmelCase = tf.math.top_k(_lowerCamelCase , k=_lowerCamelCase )
lowerCAmelCase , lowerCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowerCAmelCase = outputs[0, masked_index, :]
lowerCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
lowerCAmelCase = probs[..., target_ids]
lowerCAmelCase , lowerCAmelCase = probs.topk(_lowerCamelCase )
lowerCAmelCase = []
lowerCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
lowerCAmelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
lowerCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
lowerCAmelCase = target_ids[p].tolist()
lowerCAmelCase = p
# Filter padding out:
lowerCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowerCAmelCase = self.tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
lowerCAmelCase = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(_lowerCamelCase )
result.append(_lowerCamelCase )
if single_mask:
return result[0]
return result
def __lowercase ( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any]=None ):
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase = [targets]
try:
lowerCAmelCase = self.tokenizer.get_vocab()
except Exception:
lowerCAmelCase = {}
lowerCAmelCase = []
for target in targets:
lowerCAmelCase = vocab.get(_lowerCamelCase , _lowerCamelCase )
if id_ is None:
lowerCAmelCase = self.tokenizer(
_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , max_length=1 , truncation=_lowerCamelCase , )["""input_ids"""]
if len(_lowerCamelCase ) == 0:
logger.warning(
f'''The specified target token `{target}` does not exist in the model vocabulary. '''
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
lowerCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'''The specified target token `{target}` does not exist in the model vocabulary. '''
f'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
lowerCAmelCase = list(set(_lowerCamelCase ) )
if len(_lowerCamelCase ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
lowerCAmelCase = np.array(_lowerCamelCase )
return target_ids
def __lowercase ( self : Optional[Any] , lowerCAmelCase : Any=None , lowerCAmelCase : int=None ):
lowerCAmelCase = {}
if targets is not None:
lowerCAmelCase = self.get_target_ids(_lowerCamelCase , _lowerCamelCase )
lowerCAmelCase = target_ids
if top_k is not None:
lowerCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self : int , lowerCAmelCase : Any , *lowerCAmelCase : str , **lowerCAmelCase : int ):
lowerCAmelCase = super().__call__(_lowerCamelCase , **_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) == 1:
return outputs[0]
return outputs
| 169 |
'''simple docstring'''
def __snake_case ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
__magic_name__ = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
__magic_name__ = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
__magic_name__ = max(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase_ ) , b_binary.zfill(lowerCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 664 | 0 |
UpperCamelCase_ : Dict = 8.31_4462 # Unit - J mol-1 K-1
def UpperCamelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> str:
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def UpperCamelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> str:
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 461 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__magic_name__ : Tuple =threading.Lock()
__magic_name__ : Optional[logging.Handler] =None
__magic_name__ : List[str] ={
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
__magic_name__ : str =logging.WARNING
__magic_name__ : Any =True
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = os.getenv("TRANSFORMERS_VERBOSITY" , lowerCamelCase_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def __snake_case ( ):
'''simple docstring'''
return __name__.split("." )[0]
def __snake_case ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def __snake_case ( ):
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__magic_name__ = logging.StreamHandler() # Set sys.stderr as stream.
__magic_name__ = sys.stderr.flush
# Apply our default configuration to the library root logger.
__magic_name__ = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__magic_name__ = False
def __snake_case ( ):
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
__magic_name__ = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__magic_name__ = None
def __snake_case ( ):
'''simple docstring'''
return log_levels
def __snake_case ( lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
if name is None:
__magic_name__ = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __snake_case ( lowerCamelCase_ : int ):
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __snake_case ( lowerCamelCase_ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(lowerCamelCase_ )
def __snake_case ( lowerCamelCase_ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
__magic_name__ = False
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
__magic_name__ = True
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = _get_library_root_logger().handlers
for handler in handlers:
__magic_name__ = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(lowerCamelCase_ )
def __snake_case ( self : Union[str, Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : Any ):
'''simple docstring'''
__magic_name__ = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , lowerCamelCase_ )
if no_advisory_warnings:
return
self.warning(*lowerCamelCase_ , **lowerCamelCase_ )
__magic_name__ : int =warning_advice
@functools.lru_cache(lowerCamelCase_ )
def __snake_case ( self : Dict , *lowerCamelCase_ : int , **lowerCamelCase_ : int ):
'''simple docstring'''
self.warning(*lowerCamelCase_ , **lowerCamelCase_ )
__magic_name__ : Optional[int] =warning_once
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , *_lowerCamelCase : Tuple , **_lowerCamelCase : Optional[Any] ) -> Any: # pylint: disable=unused-argument
__magic_name__ = args[0] if args else None
def __iter__( self : int ) -> Tuple:
return iter(self._iterator )
def __getattr__( self : List[Any] , _lowerCamelCase : int ) -> List[Any]:
def empty_fn(*_lowerCamelCase : List[str] , **_lowerCamelCase : List[str] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[Any] ) -> Any:
return self
def __exit__( self : int , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] ) -> Dict:
return
class UpperCamelCase_ :
"""simple docstring"""
def __call__( self : Any , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Any ) -> List[Any]:
if _tqdm_active:
return tqdm_lib.tqdm(*_lowerCamelCase , **_lowerCamelCase )
else:
return EmptyTqdm(*_lowerCamelCase , **_lowerCamelCase )
def __A ( self : Optional[Any] , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Dict ) -> Union[str, Any]:
__magic_name__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_lowerCamelCase , **_lowerCamelCase )
def __A ( self : str ) -> Any:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__magic_name__ : List[Any] =_tqdm_cls()
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
__magic_name__ = True
hf_hub_utils.enable_progress_bars()
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
__magic_name__ = False
hf_hub_utils.disable_progress_bars()
| 664 | 0 |
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def a ( a , a , a , a ) ->Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = s.rsplit(lowerCamelCase_ , lowerCamelCase_ )
return new.join(lowerCamelCase_ )
def a ( a ) ->Tuple:
'''simple docstring'''
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def a ( a ) ->Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = ['''group_1''', '''group_2''', '''group_3''', '''group_4''']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
SCREAMING_SNAKE_CASE = key.replace(F"""{group_key}.""" , F"""{group_key}.group.""" )
if "res_path" in key:
SCREAMING_SNAKE_CASE = key.replace('''res_path.''' , '''res_path.path.''' )
if key.endswith('''.w''' ):
SCREAMING_SNAKE_CASE = rreplace(lowerCamelCase_ , '''.w''' , '''.weight''' , 1 )
if key.endswith('''.b''' ):
SCREAMING_SNAKE_CASE = rreplace(lowerCamelCase_ , '''.b''' , '''.bias''' , 1 )
SCREAMING_SNAKE_CASE = value.float()
return upgrade
@torch.no_grad()
def a ( a , a , a=None , a=True ) ->List[str]:
'''simple docstring'''
from dall_e import Encoder
SCREAMING_SNAKE_CASE = Encoder()
if os.path.exists(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE = torch.load(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(lowerCamelCase_ )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE = ckpt.state_dict()
encoder.load_state_dict(lowerCamelCase_ )
if config_path is not None:
SCREAMING_SNAKE_CASE = FlavaImageCodebookConfig.from_pretrained(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE = FlavaImageCodebookConfig()
SCREAMING_SNAKE_CASE = FlavaImageCodebook(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE = encoder.state_dict()
SCREAMING_SNAKE_CASE = upgrade_state_dict(lowerCamelCase_ )
hf_model.load_state_dict(lowerCamelCase_ )
SCREAMING_SNAKE_CASE = hf_model.state_dict()
SCREAMING_SNAKE_CASE = count_parameters(lowerCamelCase_ )
SCREAMING_SNAKE_CASE = count_parameters(lowerCamelCase_ )
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCamelCase_ )
else:
return hf_state_dict
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__lowerCAmelCase = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 201 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : Union[str, Any] ={'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str =[
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__magic_name__ : List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 664 | 0 |
'''simple docstring'''
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any=False ):
lowerCamelCase__ = OmegaConf.load(lowerCamelCase_ )
if display:
print(yaml.dump(OmegaConf.to_container(lowerCamelCase_ ) ) )
return config
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Optional[Any]=None ):
if conf_path is None:
lowerCamelCase__ = """./model_checkpoints/vqgan_only.yaml"""
lowerCamelCase__ = load_config(lowerCamelCase_ , display=lowerCamelCase_ )
lowerCamelCase__ = VQModel(**config.model.params )
if ckpt_path is None:
lowerCamelCase__ = """./model_checkpoints/vqgan_only.pt"""
lowerCamelCase__ = torch.load(lowerCamelCase_ , map_location=lowerCamelCase_ )
if ".ckpt" in ckpt_path:
lowerCamelCase__ = sd["""state_dict"""]
model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
model.to(lowerCamelCase_ )
del sd
return model
def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model.encode(lowerCamelCase_ )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
lowerCamelCase__ = model.decode(lowerCamelCase_ )
return xrec
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any]=False ):
lowerCamelCase__ , lowerCamelCase__ = string.rsplit(""".""" , 1 )
if reload:
lowerCamelCase__ = importlib.import_module(lowerCamelCase_ )
importlib.reload(lowerCamelCase_ )
return getattr(importlib.import_module(lowerCamelCase_ , package=lowerCamelCase_ ) , cls )
def A__ ( __lowerCAmelCase : List[str] ):
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Dict=True ):
lowerCamelCase__ = instantiate_from_config(lowerCamelCase_ )
if sd is not None:
model.load_state_dict(lowerCamelCase_ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ):
if ckpt:
lowerCamelCase__ = torch.load(lowerCamelCase_ , map_location="""cpu""" )
lowerCamelCase__ = pl_sd["""global_step"""]
print(F'''loaded model from global step {global_step}.''' )
else:
lowerCamelCase__ = {"""state_dict""": None}
lowerCamelCase__ = None
lowerCamelCase__ = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=lowerCamelCase_ , eval_mode=lowerCamelCase_ )["""model"""]
return model, global_step
| 50 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ : Optional[Any] ={
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : int =['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Dict =[
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Tuple =[
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__magic_name__ : int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 664 | 0 |
'''simple docstring'''
def UpperCAmelCase_ ( A , A , A , A ):
'''simple docstring'''
if height >= 1:
move_tower(height - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
move_disk(lowerCamelCase_ , lowerCamelCase_ )
move_tower(height - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
print('moving disk from' , lowerCamelCase_ , 'to' , lowerCamelCase_ )
def UpperCAmelCase_ ( ):
'''simple docstring'''
_a : Dict = int(input('Height of hanoi: ' ).strip() )
move_tower(lowerCamelCase_ , 'A' , 'B' , 'C' )
if __name__ == "__main__":
main()
| 120 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
__magic_name__ : str ={
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
__magic_name__ : Tuple ={
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def __snake_case ( lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
__magic_name__ = (images / 2 + 0.5).clamp(0 , 1 )
__magic_name__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__magic_name__ = numpy_to_pil(lowerCamelCase_ )
return images
def __snake_case ( lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
if images.ndim == 3:
__magic_name__ = images[None, ...]
__magic_name__ = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__magic_name__ = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
__magic_name__ = [Image.fromarray(lowerCamelCase_ ) for image in images]
return pil_images
| 664 | 0 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
__UpperCAmelCase =logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __a ( A ) -> Dict:
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __a ( A , A , A ) -> Any:
'''simple docstring'''
return max(metric_fn(lowerCamelCase_ , lowerCamelCase_ ) for gt in ground_truths )
def __a ( A , A , A ) -> Dict:
'''simple docstring'''
A__ = [line.strip() for line in open(lowerCamelCase_ , "r" ).readlines()]
A__ = []
if args.gold_data_mode == "qa":
A__ = pd.read_csv(lowerCamelCase_ , sep="\t" , header=lowerCamelCase_ )
for answer_list in data[1]:
A__ = ast.literal_eval(lowerCamelCase_ )
answers.append(lowerCamelCase_ )
else:
A__ = [line.strip() for line in open(lowerCamelCase_ , "r" ).readlines()]
A__ = [[reference] for reference in references]
A__ = A__ = A__ = 0
for prediction, ground_truths in zip(lowerCamelCase_ , lowerCamelCase_ ):
total += 1
em += metric_max_over_ground_truths(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
fa += metric_max_over_ground_truths(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
A__ = 1_00.0 * em / total
A__ = 1_00.0 * fa / total
logger.info(f"""F1: {fa:.2f}""" )
logger.info(f"""EM: {em:.2f}""" )
def __a ( A , A , A ) -> Tuple:
'''simple docstring'''
A__ = args.k
A__ = [line.strip() for line in open(lowerCamelCase_ , "r" ).readlines()]
A__ = [line.strip() for line in open(lowerCamelCase_ , "r" ).readlines()]
A__ = A__ = 0
for hypo, reference in zip(lowerCamelCase_ , lowerCamelCase_ ):
A__ = set(hypo.split("\t" )[:k] )
A__ = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
A__ = 1_00.0 * em / total
logger.info(f"""Precision@{k}: {em: .2f}""" )
def __a ( A , A , A ) -> str:
'''simple docstring'''
def strip_title(A ):
if title.startswith("\"" ):
A__ = title[1:]
if title.endswith("\"" ):
A__ = title[:-1]
return title
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase_ , return_tensors="pt" , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , )["input_ids"].to(args.device )
A__ = rag_model.rag.question_encoder(lowerCamelCase_ )
A__ = question_enc_outputs[0]
A__ = rag_model.retriever(
lowerCamelCase_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
A__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
A__ = []
for docs in all_docs:
A__ = [strip_title(lowerCamelCase_ ) for title in docs["title"]]
provenance_strings.append("\t".join(lowerCamelCase_ ) )
return provenance_strings
def __a ( A , A , A ) -> List[Any]:
'''simple docstring'''
with torch.no_grad():
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase_ , return_tensors="pt" , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )
A__ = inputs_dict.input_ids.to(args.device )
A__ = inputs_dict.attention_mask.to(args.device )
A__ = rag_model.generate( # rag_model overwrites generate
lowerCamelCase_ , attention_mask=lowerCamelCase_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCamelCase_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
A__ = rag_model.retriever.generator_tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
if args.print_predictions:
for q, a in zip(lowerCamelCase_ , lowerCamelCase_ ):
logger.info("Q: {} - A: {}".format(lowerCamelCase_ , lowerCamelCase_ ) )
return answers
def __a ( ) -> Dict:
'''simple docstring'''
A__ = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=lowerCamelCase_ , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=lowerCamelCase_ , choices=["exact", "compressed", "legacy"] , type=lowerCamelCase_ , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=lowerCamelCase_ , type=lowerCamelCase_ , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=lowerCamelCase_ , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=lowerCamelCase_ , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=lowerCamelCase_ , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=lowerCamelCase_ , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=lowerCamelCase_ , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=lowerCamelCase_ , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=lowerCamelCase_ , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=lowerCamelCase_ , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=lowerCamelCase_ , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
A__ = parser.parse_args()
A__ = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def __a ( A ) -> Any:
'''simple docstring'''
A__ = {}
if args.model_type is None:
A__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
A__ = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
A__ = args.n_docs
if args.index_name is not None:
A__ = args.index_name
if args.index_path is not None:
A__ = args.index_path
else:
A__ = BartForConditionalGeneration
A__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , lowerCamelCase_ )
A__ = get_scores if args.eval_mode == "e2e" else get_precision_at_k
A__ = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(lowerCamelCase_ , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(lowerCamelCase_ ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
A__ = RagRetriever.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
A__ = model_class.from_pretrained(lowerCamelCase_ , retriever=lowerCamelCase_ , **lowerCamelCase_ )
model.retriever.init_retrieval()
else:
A__ = model_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
A__ = []
for line in tqdm(lowerCamelCase_ ):
questions.append(line.strip() )
if len(lowerCamelCase_ ) == args.eval_batch_size:
A__ = evaluate_batch_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
preds_file.write("\n".join(lowerCamelCase_ ) + "\n" )
preds_file.flush()
A__ = []
if len(lowerCamelCase_ ) > 0:
A__ = evaluate_batch_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
preds_file.write("\n".join(lowerCamelCase_ ) )
preds_file.flush()
score_fn(lowerCamelCase_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
__UpperCAmelCase =get_args()
main(args) | 337 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__magic_name__ : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(
A , r'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __A ( self : Any , _lowerCamelCase : GenericTensor ) -> np.ndarray:
if self.framework == "tf":
__magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase )
else:
raise ValueError("Unsupported framework" )
return masked_index
def __A ( self : str , _lowerCamelCase : GenericTensor ) -> np.ndarray:
__magic_name__ = self.get_masked_index(_lowerCamelCase )
__magic_name__ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def __A ( self : int , _lowerCamelCase : GenericTensor ) -> Any:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowerCamelCase )
def __A ( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Any=None , **_lowerCamelCase : List[str] ) -> Dict[str, GenericTensor]:
if return_tensors is None:
__magic_name__ = self.framework
__magic_name__ = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase )
self.ensure_exactly_one_mask_token(_lowerCamelCase )
return model_inputs
def __A ( self : List[str] , _lowerCamelCase : int ) -> List[Any]:
__magic_name__ = self.model(**_lowerCamelCase )
__magic_name__ = model_inputs["input_ids"]
return model_outputs
def __A ( self : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any]=5 , _lowerCamelCase : Dict=None ) -> Dict:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__magic_name__ = target_ids.shape[0]
__magic_name__ = model_outputs["input_ids"][0]
__magic_name__ = model_outputs["logits"]
if self.framework == "tf":
__magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__magic_name__ = outputs.numpy()
__magic_name__ = outputs[0, masked_index, :]
__magic_name__ = stable_softmax(_lowerCamelCase , axis=-1 )
if target_ids is not None:
__magic_name__ = tf.gather_nd(tf.squeeze(_lowerCamelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
__magic_name__ = tf.expand_dims(_lowerCamelCase , 0 )
__magic_name__ = tf.math.top_k(_lowerCamelCase , k=_lowerCamelCase )
__magic_name__ , __magic_name__ = topk.values.numpy(), topk.indices.numpy()
else:
__magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__magic_name__ = outputs[0, masked_index, :]
__magic_name__ = logits.softmax(dim=-1 )
if target_ids is not None:
__magic_name__ = probs[..., target_ids]
__magic_name__ , __magic_name__ = probs.topk(_lowerCamelCase )
__magic_name__ = []
__magic_name__ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__magic_name__ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__magic_name__ = input_ids.numpy().copy()
if target_ids is not None:
__magic_name__ = target_ids[p].tolist()
__magic_name__ = p
# Filter padding out:
__magic_name__ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__magic_name__ = self.tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
__magic_name__ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(_lowerCamelCase )
result.append(_lowerCamelCase )
if single_mask:
return result[0]
return result
def __A ( self : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[Any]=None ) -> List[str]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = [targets]
try:
__magic_name__ = self.tokenizer.get_vocab()
except Exception:
__magic_name__ = {}
__magic_name__ = []
for target in targets:
__magic_name__ = vocab.get(_lowerCamelCase , _lowerCamelCase )
if id_ is None:
__magic_name__ = self.tokenizer(
_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , max_length=1 , truncation=_lowerCamelCase , )["input_ids"]
if len(_lowerCamelCase ) == 0:
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
"We cannot replace it with anything meaningful, ignoring it" )
continue
__magic_name__ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
__magic_name__ = list(set(_lowerCamelCase ) )
if len(_lowerCamelCase ) == 0:
raise ValueError("At least one target must be provided when passed." )
__magic_name__ = np.array(_lowerCamelCase )
return target_ids
def __A ( self : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : int=None ) -> Tuple:
__magic_name__ = {}
if targets is not None:
__magic_name__ = self.get_target_ids(_lowerCamelCase , _lowerCamelCase )
__magic_name__ = target_ids
if top_k is not None:
__magic_name__ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__( self : int , _lowerCamelCase : Any , *_lowerCamelCase : str , **_lowerCamelCase : int ) -> Optional[int]:
__magic_name__ = super().__call__(_lowerCamelCase , **_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) == 1:
return outputs[0]
return outputs
| 664 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self , UpperCamelCase__ ) -> Any:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
lowerCamelCase : Any = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(_lowerCamelCase )
def _lowercase ( self ) -> Dict:
lowerCamelCase : Optional[int] = "sshleifer/tiny-gpt2"
lowerCamelCase : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCamelCase : Union[str, Any] = PyTorchBenchmark(_lowerCamelCase )
lowerCamelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : List[str] = "sgugger/tiny-distilbert-classification"
lowerCamelCase : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , only_pretrain_model=_lowerCamelCase , )
lowerCamelCase : str = PyTorchBenchmark(_lowerCamelCase )
lowerCamelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self ) -> Dict:
lowerCamelCase : Union[str, Any] = "sshleifer/tiny-gpt2"
lowerCamelCase : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , torchscript=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCamelCase : str = PyTorchBenchmark(_lowerCamelCase )
lowerCamelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def _lowercase ( self ) -> Any:
lowerCamelCase : List[Any] = "sshleifer/tiny-gpt2"
lowerCamelCase : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , fpaa=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCamelCase : str = PyTorchBenchmark(_lowerCamelCase )
lowerCamelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : str = "sshleifer/tiny-gpt2"
lowerCamelCase : List[Any] = AutoConfig.from_pretrained(_lowerCamelCase )
# set architectures equal to `None`
lowerCamelCase : List[Any] = None
lowerCamelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCamelCase : str = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
lowerCamelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self ) -> Any:
lowerCamelCase : Union[str, Any] = "sshleifer/tiny-gpt2"
lowerCamelCase : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCamelCase : Optional[Any] = PyTorchBenchmark(_lowerCamelCase )
lowerCamelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision" )
def _lowercase ( self ) -> int:
lowerCamelCase : Optional[Any] = "sshleifer/tiny-gpt2"
lowerCamelCase : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_lowerCamelCase , multi_process=_lowerCamelCase , )
lowerCamelCase : Union[str, Any] = PyTorchBenchmark(_lowerCamelCase )
lowerCamelCase : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowercase ( self ) -> Dict:
lowerCamelCase : Dict = "sshleifer/tiny-gpt2"
lowerCamelCase : int = AutoConfig.from_pretrained(_lowerCamelCase )
lowerCamelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCamelCase : Any = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
lowerCamelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : Union[str, Any] = "sshleifer/tinier_bart"
lowerCamelCase : List[str] = AutoConfig.from_pretrained(_lowerCamelCase )
lowerCamelCase : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCamelCase : Optional[Any] = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
lowerCamelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : int = "sshleifer/tiny-gpt2"
lowerCamelCase : int = AutoConfig.from_pretrained(_lowerCamelCase )
lowerCamelCase : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCamelCase : Dict = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
lowerCamelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowercase ( self ) -> List[str]:
lowerCamelCase : Union[str, Any] = "sshleifer/tinier_bart"
lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(_lowerCamelCase )
lowerCamelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCamelCase : List[Any] = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
lowerCamelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowercase ( self ) -> int:
lowerCamelCase : Dict = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , save_to_csv=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowerCamelCase , "inf_time.csv" ) , train_memory_csv_file=os.path.join(_lowerCamelCase , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(_lowerCamelCase , "inf_mem.csv" ) , train_time_csv_file=os.path.join(_lowerCamelCase , "train_time.csv" ) , env_info_csv_file=os.path.join(_lowerCamelCase , "env.csv" ) , multi_process=_lowerCamelCase , )
lowerCamelCase : Any = PyTorchBenchmark(_lowerCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCamelCase , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , "train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , "train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , "env.csv" ) ).exists() )
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : List[str] = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(UpperCamelCase__ ):
self.assertTrue(hasattr(_lowerCamelCase , "sequential" ) )
self.assertTrue(hasattr(_lowerCamelCase , "cumulative" ) )
self.assertTrue(hasattr(_lowerCamelCase , "current" ) )
self.assertTrue(hasattr(_lowerCamelCase , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowerCamelCase , "log.txt" ) , log_print=_lowerCamelCase , trace_memory_line_by_line=_lowerCamelCase , multi_process=_lowerCamelCase , )
lowerCamelCase : Any = PyTorchBenchmark(_lowerCamelCase )
lowerCamelCase : str = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_lowerCamelCase , "log.txt" ) ).exists() )
| 311 |
'''simple docstring'''
from __future__ import annotations
def __snake_case ( lowerCamelCase_ : list[int] , lowerCamelCase_ : int ):
'''simple docstring'''
if len(lowerCamelCase_ ) < k or k < 0:
raise ValueError("Invalid Input" )
__magic_name__ = __magic_name__ = sum(array[:k] )
for i in range(len(lowerCamelCase_ ) - k ):
__magic_name__ = current_sum - array[i] + array[i + k]
__magic_name__ = max(lowerCamelCase_ , lowerCamelCase_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__magic_name__ : List[str] =[randint(-10_00, 10_00) for i in range(1_00)]
__magic_name__ : List[str] =randint(0, 1_10)
print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 664 | 0 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a_ : List[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.14.0', 'To fix: pip install -r examples/pytorch/audio-classification/requirements.txt')
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 16000 ):
"""simple docstring"""
lowerCamelCase = int(round(sample_rate * max_length ) )
if len(lowerCamelCase_ ) <= sample_length:
return wav
lowerCamelCase = randint(0 , len(lowerCamelCase_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = field(default=UpperCAmelCase_ , metadata={'help': 'Name of a dataset from the datasets package'})
_A = field(
default=UpperCAmelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
_A = field(
default=UpperCAmelCase_ , metadata={'help': 'A file containing the training audio paths and labels.'})
_A = field(
default=UpperCAmelCase_ , metadata={'help': 'A file containing the validation audio paths and labels.'})
_A = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_A = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_A = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_A = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
_A = field(
default=UpperCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_A = field(
default=UpperCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_A = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_A = field(
default=UpperCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
_A = field(
default=UpperCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
_A = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_A = field(
default=UpperCAmelCase_ , metadata={'help': 'Name or path of preprocessor config.'})
_A = field(
default=UpperCAmelCase_ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
_A = field(
default=UpperCAmelCase_ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
_A = field(
default=UpperCAmelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_A = field(
default=UpperCAmelCase_ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
_A = field(
default=UpperCAmelCase_ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def _a (self ):
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , _lowerCamelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def __lowercase( ):
"""simple docstring"""
lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase , lowerCamelCase , lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase , lowerCamelCase , lowerCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , lowerCamelCase_ , lowerCamelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase_ )
transformers.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowerCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
lowerCamelCase = DatasetDict()
lowerCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. """
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F"""{", ".join(raw_datasets["train"].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. """
"Make sure to set `--label_column_name` to the correct text column - one of "
F"""{", ".join(raw_datasets["train"].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowerCamelCase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowerCamelCase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowerCamelCase = feature_extractor.model_input_names[0]
def train_transforms(UpperCAmelCase__ ):
lowerCamelCase = []
for audio in batch[data_args.audio_column_name]:
lowerCamelCase = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowerCamelCase_ )
lowerCamelCase = feature_extractor(lowerCamelCase_ , sampling_rate=feature_extractor.sampling_rate )
lowerCamelCase = {model_input_name: inputs.get(lowerCamelCase_ )}
lowerCamelCase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(UpperCAmelCase__ ):
lowerCamelCase = [audio["array"] for audio in batch[data_args.audio_column_name]]
lowerCamelCase = feature_extractor(lowerCamelCase_ , sampling_rate=feature_extractor.sampling_rate )
lowerCamelCase = {model_input_name: inputs.get(lowerCamelCase_ )}
lowerCamelCase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowerCamelCase = raw_datasets["train"].features[data_args.label_column_name].names
lowerCamelCase , lowerCamelCase = {}, {}
for i, label in enumerate(lowerCamelCase_ ):
lowerCamelCase = str(lowerCamelCase_ )
lowerCamelCase = label
# Load the accuracy metric from the datasets package
lowerCamelCase = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(UpperCAmelCase__ ):
lowerCamelCase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowerCamelCase_ , references=eval_pred.label_ids )
lowerCamelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCamelCase_ ) , labelaid=lowerCamelCase_ , idalabel=lowerCamelCase_ , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCamelCase = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowerCamelCase_ , output_all_columns=lowerCamelCase_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCamelCase = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowerCamelCase_ , output_all_columns=lowerCamelCase_ )
# Initialize our trainer
lowerCamelCase = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=lowerCamelCase_ , tokenizer=lowerCamelCase_ , )
# Training
if training_args.do_train:
lowerCamelCase = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase = last_checkpoint
lowerCamelCase = trainer.train(resume_from_checkpoint=lowerCamelCase_ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCamelCase = trainer.evaluate()
trainer.log_metrics("eval" , lowerCamelCase_ )
trainer.save_metrics("eval" , lowerCamelCase_ )
# Write model card and (optionally) push to hub
lowerCamelCase = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase_ )
else:
trainer.create_model_card(**lowerCamelCase_ )
if __name__ == "__main__":
main() | 623 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : int =logging.get_logger(__name__)
__magic_name__ : List[Any] ={}
class UpperCamelCase_ ( A ):
"""simple docstring"""
UpperCAmelCase__ : int = '''llama'''
UpperCAmelCase__ : Any = ['''past_key_values''']
def __init__( self : List[Any] , _lowerCamelCase : List[Any]=3_20_00 , _lowerCamelCase : Optional[Any]=40_96 , _lowerCamelCase : Tuple=1_10_08 , _lowerCamelCase : List[Any]=32 , _lowerCamelCase : Tuple=32 , _lowerCamelCase : List[str]=None , _lowerCamelCase : str="silu" , _lowerCamelCase : Optional[Any]=20_48 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : Union[str, Any]=1e-6 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Dict=0 , _lowerCamelCase : int=1 , _lowerCamelCase : str=2 , _lowerCamelCase : List[Any]=1 , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : List[str]=None , **_lowerCamelCase : List[Any] , ) -> Any:
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = hidden_size
__magic_name__ = intermediate_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__magic_name__ = num_attention_heads
__magic_name__ = num_key_value_heads
__magic_name__ = hidden_act
__magic_name__ = initializer_range
__magic_name__ = rms_norm_eps
__magic_name__ = pretraining_tp
__magic_name__ = use_cache
__magic_name__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase , )
def __A ( self : Union[str, Any] ) -> List[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'got {self.rope_scaling}' )
__magic_name__ = self.rope_scaling.get("type" , _lowerCamelCase )
__magic_name__ = self.rope_scaling.get("factor" , _lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(_lowerCamelCase , _lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 664 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCamelCase : int = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__lowerCamelCase : List[str] = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__lowerCamelCase : List[str] = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__lowerCamelCase : List[str] = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
__lowerCamelCase : Dict = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
__lowerCamelCase : int = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
__lowerCamelCase : List[Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__lowerCamelCase : List[Any] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__lowerCamelCase : List[Any] = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ = DPRContextEncoderTokenizer
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ = DPRQuestionEncoderTokenizer
__lowerCamelCase : Optional[int] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
__lowerCamelCase : List[str] = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
__lowerCamelCase : Union[str, Any] = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(UpperCamelCase_ )
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __call__( self : Tuple , __A : Optional[int] , __A : Optional[str] = None , __A : Optional[str] = None , __A : Union[bool, str] = False , __A : Union[bool, str] = False , __A : Optional[int] = None , __A : Optional[Union[str, TensorType]] = None , __A : Optional[bool] = None , **__A : Union[str, Any] , ):
if titles is None and texts is None:
return super().__call__(
_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
elif titles is None or texts is None:
snake_case__ : List[str] = titles if texts is None else texts
return super().__call__(
_lowerCamelCase , _lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
snake_case__ : int = titles if not isinstance(_lowerCamelCase , _lowerCamelCase ) else [titles]
snake_case__ : Dict = texts if not isinstance(_lowerCamelCase , _lowerCamelCase ) else [texts]
snake_case__ : Optional[Any] = len(_lowerCamelCase )
snake_case__ : Tuple = questions if not isinstance(_lowerCamelCase , _lowerCamelCase ) else [questions] * n_passages
assert len(_lowerCamelCase ) == len(
_lowerCamelCase ), f'''There should be as many titles than texts but got {len(_lowerCamelCase )} titles and {len(_lowerCamelCase )} texts.'''
snake_case__ : Tuple = super().__call__(_lowerCamelCase , _lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase )["input_ids"]
snake_case__ : Optional[Any] = super().__call__(_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase )["input_ids"]
snake_case__ : Optional[int] = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowerCamelCase , _lowerCamelCase )
]
}
if return_attention_mask is not False:
snake_case__ : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
snake_case__ : Any = attention_mask
return self.pad(_lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors=_lowerCamelCase )
def _lowercase ( self : List[str] , __A : BatchEncoding , __A : DPRReaderOutput , __A : int = 1_6 , __A : int = 6_4 , __A : int = 4 , ):
snake_case__ : int = reader_input["input_ids"]
snake_case__, snake_case__, snake_case__ : str = reader_output[:3]
snake_case__ : int = len(_lowerCamelCase )
snake_case__ : List[Any] = sorted(range(_lowerCamelCase ) , reverse=_lowerCamelCase , key=relevance_logits.__getitem__ )
snake_case__ : Optional[Any] = []
for doc_id in sorted_docs:
snake_case__ : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
snake_case__ : Optional[int] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
snake_case__ : Optional[Any] = sequence_ids.index(self.pad_token_id )
else:
snake_case__ : Tuple = len(_lowerCamelCase )
snake_case__ : Dict = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowerCamelCase , top_spans=_lowerCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowerCamelCase , start_index=_lowerCamelCase , end_index=_lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowerCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _lowercase ( self : Any , __A : List[int] , __A : List[int] , __A : int , __A : int , ):
snake_case__ : int = []
for start_index, start_score in enumerate(_lowerCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
snake_case__ : List[Any] = sorted(_lowerCamelCase , key=lambda __A : x[1] , reverse=_lowerCamelCase )
snake_case__ : str = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
snake_case__ : int = end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowerCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCamelCase_ )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = READER_PRETRAINED_VOCAB_FILES_MAP
a_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = READER_PRETRAINED_INIT_CONFIGURATION
a_ = ['''input_ids''', '''attention_mask''']
a_ = DPRReaderTokenizer
| 297 |
'''simple docstring'''
__magic_name__ : Dict =8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def __snake_case ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __snake_case ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 664 | 0 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
a_ = logging.getLogger(__name__)
a_ = 'Hello world! cécé herlolip'
a_ = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def __lowercase ( lowerCamelCase : Optional[int] , lowerCamelCase : Dict ):
UpperCamelCase_ : List[str] = BertAbsConfig(
temp_dir='.' , finetune_bert=lowerCamelCase_ , large=lowerCamelCase_ , share_emb=lowerCamelCase_ , use_bert_emb=lowerCamelCase_ , encoder='bert' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
UpperCamelCase_ : Any = torch.load(lowerCamelCase_ , lambda lowerCamelCase , lowerCamelCase : storage )
UpperCamelCase_ : Optional[int] = AbsSummarizer(lowerCamelCase_ , torch.device('cpu' ) , lowerCamelCase_ )
original.eval()
UpperCamelCase_ : List[str] = BertAbsSummarizer(lowerCamelCase_ , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
UpperCamelCase_ : Union[str, Any] = BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
UpperCamelCase_ : Union[str, Any] = tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) )
UpperCamelCase_ : List[Any] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
UpperCamelCase_ : str = tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) )
UpperCamelCase_ : Optional[Any] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
UpperCamelCase_ : Optional[Any] = encoder_input_ids
UpperCamelCase_ : Tuple = decoder_input_ids
UpperCamelCase_ : List[str] = None
UpperCamelCase_ : int = None
UpperCamelCase_ : Any = None
UpperCamelCase_ : Any = None
UpperCamelCase_ : List[Any] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
UpperCamelCase_ : Union[str, Any] = original(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
UpperCamelCase_ : Dict = original.generator(lowerCamelCase_ )
UpperCamelCase_ : str = new_model(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
UpperCamelCase_ : Dict = new_model.generator(lowerCamelCase_ )
UpperCamelCase_ : Union[str, Any] = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(lowerCamelCase_ ) )
UpperCamelCase_ : Any = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(lowerCamelCase_ ) )
UpperCamelCase_ : Optional[Any] = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
a_ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 417 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__magic_name__ : List[Any] =logging.getLogger(__name__)
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : str=-1 ) -> List[str]:
# in NER datasets, the last column is usually reserved for NER label
__magic_name__ = label_idx
def __A ( self : Any , _lowerCamelCase : str , _lowerCamelCase : Union[Split, str] ) -> List[InputExample]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = mode.value
__magic_name__ = os.path.join(_lowerCamelCase , f'{mode}.txt' )
__magic_name__ = 1
__magic_name__ = []
with open(_lowerCamelCase , encoding="utf-8" ) as f:
__magic_name__ = []
__magic_name__ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
__magic_name__ = []
__magic_name__ = []
else:
__magic_name__ = line.split(" " )
words.append(splits[0] )
if len(_lowerCamelCase ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
return examples
def __A ( self : Optional[Any] , _lowerCamelCase : TextIO , _lowerCamelCase : TextIO , _lowerCamelCase : List ) -> Union[str, Any]:
__magic_name__ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(_lowerCamelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__magic_name__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(_lowerCamelCase )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def __A ( self : Tuple , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
__magic_name__ = f.read().splitlines()
if "O" not in labels:
__magic_name__ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __init__( self : int ) -> str:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __A ( self : int , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
__magic_name__ = f.read().splitlines()
if "O" not in labels:
__magic_name__ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __A ( self : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[Split, str] ) -> List[InputExample]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = mode.value
__magic_name__ = os.path.join(_lowerCamelCase , f'{mode}.txt' )
__magic_name__ = 1
__magic_name__ = []
with open(_lowerCamelCase , encoding="utf-8" ) as f:
for sentence in parse_incr(_lowerCamelCase ):
__magic_name__ = []
__magic_name__ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
return examples
def __A ( self : Optional[int] , _lowerCamelCase : TextIO , _lowerCamelCase : TextIO , _lowerCamelCase : List ) -> Any:
__magic_name__ = 0
for sentence in parse_incr(_lowerCamelCase ):
__magic_name__ = preds_list[example_id]
__magic_name__ = ""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(_lowerCamelCase )
example_id += 1
def __A ( self : Dict , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 664 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class a_ :
def __init__( self , snake_case_ ):
_lowerCAmelCase : List[Any] = value
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Optional[Any] = None
class a_ :
def __init__( self , snake_case_ ):
_lowerCAmelCase : Tuple = tree
def __UpperCamelCase ( self , snake_case_ ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 384 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0 ) -> None:
__magic_name__ , __magic_name__ = row, column
__magic_name__ = [[default_value for c in range(_lowerCamelCase )] for r in range(_lowerCamelCase )]
def __str__( self : Optional[Any] ) -> str:
__magic_name__ = f'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
__magic_name__ = 0
for row_vector in self.array:
for obj in row_vector:
__magic_name__ = max(_lowerCamelCase , len(str(_lowerCamelCase ) ) )
__magic_name__ = f'%{max_element_length}s'
# Make string and return
def single_line(_lowerCamelCase : list[float] ) -> str:
nonlocal string_format_identifier
__magic_name__ = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self : Optional[int] ) -> str:
return str(self )
def __A ( self : Optional[Any] , _lowerCamelCase : tuple[int, int] ) -> bool:
if not (isinstance(_lowerCamelCase , (list, tuple) ) and len(_lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Optional[int] , _lowerCamelCase : tuple[int, int] ) -> Any:
assert self.validate_indicies(_lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Tuple , _lowerCamelCase : tuple[int, int] , _lowerCamelCase : float ) -> None:
assert self.validate_indicies(_lowerCamelCase )
__magic_name__ = value
def __add__( self : Union[str, Any] , _lowerCamelCase : Matrix ) -> Matrix:
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c] + another[r, c]
return result
def __neg__( self : int ) -> Matrix:
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = -self[r, c]
return result
def __sub__( self : Optional[int] , _lowerCamelCase : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : Optional[int] , _lowerCamelCase : int | float | Matrix ) -> Matrix:
if isinstance(_lowerCamelCase , (int, float) ): # Scalar multiplication
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c] * another
return result
elif isinstance(_lowerCamelCase , _lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
__magic_name__ = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__magic_name__ = f'Unsupported type given for another ({type(_lowerCamelCase )})'
raise TypeError(_lowerCamelCase )
def __A ( self : Optional[int] ) -> Matrix:
__magic_name__ = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c]
return result
def __A ( self : int , _lowerCamelCase : Matrix , _lowerCamelCase : Matrix ) -> Any:
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__magic_name__ = v.transpose()
__magic_name__ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = Matrix(3 , 3 , 0 )
for i in range(3 ):
__magic_name__ = 1
print(F'a^(-1) is {ainv}' )
# u, v
__magic_name__ = Matrix(3 , 1 , 0 )
__magic_name__ , __magic_name__ , __magic_name__ = 1, 2, -3
__magic_name__ = Matrix(3 , 1 , 0 )
__magic_name__ , __magic_name__ , __magic_name__ = 4, -2, 5
print(F'u is {u}' )
print(F'v is {v}' )
print(F'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(F'(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase_ , lowerCamelCase_ )}' )
def __snake_case ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 664 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 169 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__magic_name__ : List[Any] =logging.getLogger(__name__)
__magic_name__ : int ='Hello world! cécé herlolip'
__magic_name__ : List[Any] =namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def __snake_case ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict ):
'''simple docstring'''
__magic_name__ = BertAbsConfig(
temp_dir="." , finetune_bert=lowerCamelCase_ , large=lowerCamelCase_ , share_emb=lowerCamelCase_ , use_bert_emb=lowerCamelCase_ , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__magic_name__ = torch.load(lowerCamelCase_ , lambda lowerCamelCase_ , lowerCamelCase_ : storage )
__magic_name__ = AbsSummarizer(lowerCamelCase_ , torch.device("cpu" ) , lowerCamelCase_ )
original.eval()
__magic_name__ = BertAbsSummarizer(lowerCamelCase_ , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
__magic_name__ = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
__magic_name__ = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) )
__magic_name__ = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
__magic_name__ = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) )
__magic_name__ = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__magic_name__ = encoder_input_ids
__magic_name__ = decoder_input_ids
__magic_name__ = __magic_name__ = None
__magic_name__ = None
__magic_name__ = __magic_name__ = None
__magic_name__ = __magic_name__ = None
__magic_name__ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__magic_name__ = original(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
__magic_name__ = original.generator(lowerCamelCase_ )
__magic_name__ = new_model(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
__magic_name__ = new_model.generator(lowerCamelCase_ )
__magic_name__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowerCamelCase_ ) )
__magic_name__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowerCamelCase_ ) )
__magic_name__ = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
__magic_name__ : Dict =argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
__magic_name__ : Any =parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 664 | 0 |
UpperCamelCase_ : Optional[Any] = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCamelCase_ : str = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCamelCase ( _UpperCAmelCase : str ) -> List[str]:
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCamelCase ( _UpperCAmelCase : str ) -> int:
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCamelCase ( ) -> Dict:
'''simple docstring'''
_lowercase : Dict = "Morse code here!"
print(lowerCamelCase_ )
_lowercase : List[str] = encrypt(lowerCamelCase_ )
print(lowerCamelCase_ )
_lowercase : str = decrypt(lowerCamelCase_ )
print(lowerCamelCase_ )
if __name__ == "__main__":
main()
| 461 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ) -> str:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__magic_name__ = [[1, 2, 4], [1, 2, 3, 4]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
self.assertTrue(isinstance(dc.token_ids , _lowerCamelCase ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __A ( self : List[Any] ) -> str:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__magic_name__ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(_lowerCamelCase ) # fails here
def __A ( self : List[Any] ) -> int:
__magic_name__ = [[1, 2, 3], [1, 2, 4]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(3 )
__magic_name__ = stepped is True and completed is True and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __A ( self : Any ) -> Union[str, Any]:
__magic_name__ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 664 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
UpperCamelCase_ : Optional[Any] = CycleDiffusionPipeline
UpperCamelCase_ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
UpperCamelCase_ : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''}
UpperCamelCase_ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
UpperCamelCase_ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase_ : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self :List[Any] ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , num_train_timesteps=1_0_0_0 , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
SCREAMING_SNAKE_CASE = CLIPTextModel(_lowerCamelCase )
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__ ( self :List[Any] , lowercase :Optional[Any] , lowercase :Tuple=0 ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE = image / 2 + 0.5
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self :List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = CycleDiffusionPipeline(**_lowerCamelCase )
SCREAMING_SNAKE_CASE = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(_lowerCamelCase )
SCREAMING_SNAKE_CASE = pipe(**_lowerCamelCase )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
SCREAMING_SNAKE_CASE = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def snake_case__ ( self :Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowerCamelCase , '''half''' ):
SCREAMING_SNAKE_CASE = module.half()
SCREAMING_SNAKE_CASE = CycleDiffusionPipeline(**_lowerCamelCase )
SCREAMING_SNAKE_CASE = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(_lowerCamelCase )
SCREAMING_SNAKE_CASE = pipe(**_lowerCamelCase )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
SCREAMING_SNAKE_CASE = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def snake_case__ ( self :str ) -> int:
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def snake_case__ ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def snake_case__ ( self :List[str] ) -> List[str]:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def snake_case__ ( self :Union[str, Any] ) -> int:
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def snake_case__ ( self :Tuple ) -> List[str]:
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def snake_case__ ( self :str ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self :List[str] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
SCREAMING_SNAKE_CASE = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
SCREAMING_SNAKE_CASE = init_image.resize((5_1_2, 5_1_2) )
SCREAMING_SNAKE_CASE = '''CompVis/stable-diffusion-v1-4'''
SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained(_lowerCamelCase , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE = CycleDiffusionPipeline.from_pretrained(
_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE = '''A black colored car'''
SCREAMING_SNAKE_CASE = '''A blue colored car'''
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=_lowerCamelCase , source_prompt=_lowerCamelCase , image=_lowerCamelCase , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCamelCase , output_type='''np''' , )
SCREAMING_SNAKE_CASE = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def snake_case__ ( self :List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
SCREAMING_SNAKE_CASE = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
SCREAMING_SNAKE_CASE = init_image.resize((5_1_2, 5_1_2) )
SCREAMING_SNAKE_CASE = '''CompVis/stable-diffusion-v1-4'''
SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained(_lowerCamelCase , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE = CycleDiffusionPipeline.from_pretrained(_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE = '''A black colored car'''
SCREAMING_SNAKE_CASE = '''A blue colored car'''
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=_lowerCamelCase , source_prompt=_lowerCamelCase , image=_lowerCamelCase , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCamelCase , output_type='''np''' , )
SCREAMING_SNAKE_CASE = output.images
assert np.abs(image - expected_image ).max() < 2e-2 | 201 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__magic_name__ : Dict ={
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_28,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls : Any ) -> Union[str, Any]:
__magic_name__ = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def __A ( cls : Any ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def __A ( self : Optional[Any] ) -> Dict:
__magic_name__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowerCamelCase , repo_id="test-config" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : str ) -> Optional[int]:
__magic_name__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_lowerCamelCase , repo_id="valid_org/test-config-org" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : Optional[int] ) -> Union[str, Any]:
CustomConfig.register_for_auto_class()
__magic_name__ = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
__magic_name__ = AutoConfig.from_pretrained(f'{USER}/test-dynamic-config' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Optional[int] ) -> Optional[Any]:
__magic_name__ = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__magic_name__ = c.n_embd + 1 # int
__magic_name__ = c.resid_pdrop + 1.0 # float
__magic_name__ = not c.scale_attn_weights # bool
__magic_name__ = c.summary_type + "foo" # str
c.update_from_string(
f'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(_lowerCamelCase , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(_lowerCamelCase , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(_lowerCamelCase , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(_lowerCamelCase , c.summary_type , "mismatch for key: summary_type" )
def __A ( self : List[Any] ) -> Union[str, Any]:
__magic_name__ = PretrainedConfig()
__magic_name__ = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_lowerCamelCase , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
__magic_name__ = [key for key, value in config_common_kwargs.items() if value == getattr(_lowerCamelCase , _lowerCamelCase )]
if len(_lowerCamelCase ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f' {", ".join(_lowerCamelCase )}.' )
def __A ( self : List[Any] ) -> List[Any]:
with self.assertRaises(_lowerCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(_lowerCamelCase )
def __A ( self : Tuple ) -> int:
# A mock response for an HTTP head request to emulate server down
__magic_name__ = mock.Mock()
__magic_name__ = 5_00
__magic_name__ = {}
__magic_name__ = HTTPError
__magic_name__ = {}
# Download this model to make sure it's in the cache.
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=_lowerCamelCase ) as mock_head:
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def __A ( self : Union[str, Any] ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
__magic_name__ = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def __A ( self : Dict ) -> Optional[int]:
__magic_name__ = AutoConfig.from_pretrained("bert-base-cased" )
__magic_name__ = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_lowerCamelCase )
__magic_name__ = 2
json.dump(configuration.to_dict() , open(os.path.join(_lowerCamelCase , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__magic_name__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__magic_name__ = ["config.42.0.0.json"]
__magic_name__ = 7_68
configuration.save_pretrained(_lowerCamelCase )
shutil.move(os.path.join(_lowerCamelCase , "config.4.0.0.json" ) , os.path.join(_lowerCamelCase , "config.42.0.0.json" ) )
__magic_name__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def __A ( self : Optional[int] ) -> str:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__magic_name__ = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
__magic_name__ = "v4.0.0"
__magic_name__ , __magic_name__ = new_transformers.models.auto.AutoConfig.from_pretrained(
_lowerCamelCase , return_unused_kwargs=_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_lowerCamelCase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__magic_name__ = "v3.0.0"
__magic_name__ = old_transformers.models.auto.AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 664 | 0 |
'''simple docstring'''
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ):
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = {}
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if vertex not in self.adjacency:
lowerCamelCase__ = {}
self.num_vertices += 1
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
self.add_vertex(_lowerCamelCase )
self.add_vertex(_lowerCamelCase )
if head == tail:
return
lowerCamelCase__ = weight
lowerCamelCase__ = weight
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_edges()
for edge in edges:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = edge
edges.remove((tail, head, weight) )
for i in range(len(_lowerCamelCase ) ):
lowerCamelCase__ = list(edges[i] )
edges.sort(key=lambda _lowerCAmelCase : e[2] )
for i in range(len(_lowerCamelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowerCamelCase__ = edges[i][2] + 1
for edge in edges:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = edge
lowerCamelCase__ = weight
lowerCamelCase__ = weight
def __str__( self ):
lowerCamelCase__ = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowerCamelCase__ = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def UpperCamelCase_ ( self ):
return self.adjacency.keys()
@staticmethod
def UpperCamelCase_ ( _lowerCAmelCase=None ,_lowerCAmelCase=None ):
lowerCamelCase__ = Graph()
if vertices is None:
lowerCamelCase__ = []
if edges is None:
lowerCamelCase__ = []
for vertex in vertices:
g.add_vertex(_lowerCamelCase )
for edge in edges:
g.add_edge(*_lowerCamelCase )
return g
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ):
lowerCamelCase__ = {}
lowerCamelCase__ = {}
def __len__( self ):
return len(self.parent )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if item in self.parent:
return self.find(_lowerCamelCase )
lowerCamelCase__ = item
lowerCamelCase__ = 0
return item
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if item not in self.parent:
return self.make_set(_lowerCamelCase )
if item != self.parent[item]:
lowerCamelCase__ = self.find(self.parent[item] )
return self.parent[item]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.find(_lowerCamelCase )
lowerCamelCase__ = self.find(_lowerCamelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowerCamelCase__ = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowerCamelCase__ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowerCamelCase__ = roota
return roota
return None
@staticmethod
def UpperCamelCase_ ( _lowerCAmelCase ):
lowerCamelCase__ = graph.num_vertices
lowerCamelCase__ = Graph.UnionFind()
lowerCamelCase__ = []
while num_components > 1:
lowerCamelCase__ = {}
for vertex in graph.get_vertices():
lowerCamelCase__ = -1
lowerCamelCase__ = graph.get_edges()
for edge in edges:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = edge
lowerCamelCase__ = union_find.find(_lowerCamelCase )
lowerCamelCase__ = union_find.find(_lowerCamelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase__ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase__ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = cheap_edge[vertex]
if union_find.find(_lowerCamelCase ) != union_find.find(_lowerCamelCase ):
union_find.union(_lowerCamelCase ,_lowerCamelCase )
mst_edges.append(cheap_edge[vertex] )
lowerCamelCase__ = num_components - 1
lowerCamelCase__ = Graph.build(edges=_lowerCamelCase )
return mst
| 50 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , _lowerCamelCase : str , _lowerCamelCase : Optional[Any]=7 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : List[Any]=18 , _lowerCamelCase : Union[str, Any]=30 , _lowerCamelCase : Tuple=4_00 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=True , _lowerCamelCase : Dict=[0.5, 0.5, 0.5] , _lowerCamelCase : Dict=[0.5, 0.5, 0.5] , ) -> Dict:
__magic_name__ = size if size is not None else {"height": 18, "width": 18}
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = min_resolution
__magic_name__ = max_resolution
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = do_normalize
__magic_name__ = image_mean
__magic_name__ = image_std
def __A ( self : int ) -> List[str]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase_ ( A , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = DPTImageProcessor if is_vision_available() else None
def __A ( self : Dict ) -> Any:
__magic_name__ = DPTImageProcessingTester(self )
@property
def __A ( self : str ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Tuple ) -> List[str]:
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "size" ) )
def __A ( self : List[str] ) -> List[Any]:
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __A ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Dict ) -> Optional[Any]:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Optional[int] ) -> Dict:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 664 | 0 |
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
UpperCAmelCase_ : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def UpperCAmelCase_ ( A , A , A , A , A ):
'''simple docstring'''
for attribute in key.split('.' ):
_a : Optional[Any] = getattr(lowerCamelCase_ , lowerCamelCase_ )
if weight_type is not None:
_a : str = getattr(lowerCamelCase_ , lowerCamelCase_ ).shape
else:
_a : Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_a : Any = value
elif weight_type == "weight_g":
_a : Union[str, Any] = value
elif weight_type == "weight_v":
_a : Union[str, Any] = value
elif weight_type == "bias":
_a : Optional[int] = value
else:
_a : int = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
_a : Dict = []
_a : int = fairseq_model.state_dict()
_a : Union[str, Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
_a : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
_a : Dict = True
else:
for key, mapped_key in MAPPING.items():
_a : Union[str, Any] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
_a : int = True
if "*" in mapped_key:
_a : List[str] = name.split(lowerCamelCase_ )[0].split('.' )[-2]
_a : Any = mapped_key.replace('*' , lowerCamelCase_ )
if "weight_g" in name:
_a : Dict = 'weight_g'
elif "weight_v" in name:
_a : str = 'weight_v'
elif "bias" in name:
_a : List[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_a : Tuple = 'weight'
else:
_a : Optional[int] = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
continue
if not is_used:
unused_weights.append(lowerCamelCase_ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def UpperCAmelCase_ ( A , A , A , A , A ):
'''simple docstring'''
_a : Optional[int] = full_name.split('conv_layers.' )[-1]
_a : List[str] = name.split('.' )
_a : List[str] = int(items[0] )
_a : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
_a : Union[str, Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
_a : Dict = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
_a : Tuple = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
_a : Union[str, Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase_ )
@torch.no_grad()
def UpperCAmelCase_ ( A , A , A=None , A=None , A=True ):
'''simple docstring'''
if config_path is not None:
_a : Optional[Any] = UniSpeechSatConfig.from_pretrained(lowerCamelCase_ )
else:
_a : List[str] = UniSpeechSatConfig()
_a : int = ''
if is_finetuned:
_a : Optional[Any] = UniSpeechSatForCTC(lowerCamelCase_ )
else:
_a : Tuple = UniSpeechSatForPreTraining(lowerCamelCase_ )
_a , _a , _a : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
_a : Optional[int] = model[0].eval()
recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ )
hf_wavavec.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 120 |
'''simple docstring'''
import numpy
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCamelCase : numpy.ndarray , _lowerCamelCase : numpy.ndarray ) -> None:
__magic_name__ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__magic_name__ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__magic_name__ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__magic_name__ = numpy.random.rand(3 , 1 )
# Real output values provided.
__magic_name__ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__magic_name__ = numpy.zeros(output_array.shape )
def __A ( self : int ) -> numpy.ndarray:
__magic_name__ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __A ( self : Dict ) -> None:
__magic_name__ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__magic_name__ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__magic_name__ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __A ( self : Optional[int] , _lowerCamelCase : numpy.ndarray , _lowerCamelCase : int , _lowerCamelCase : bool ) -> None:
for iteration in range(1 , iterations + 1 ):
__magic_name__ = self.feedforward()
self.back_propagation()
if give_loss:
__magic_name__ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'Iteration {iteration} Loss: {loss}' )
def __A ( self : Tuple , _lowerCamelCase : numpy.ndarray ) -> int:
__magic_name__ = input_arr
__magic_name__ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def __snake_case ( lowerCamelCase_ : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def __snake_case ( lowerCamelCase_ : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__magic_name__ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__magic_name__ = TwoHiddenLayerNeuralNetwork(
input_array=lowerCamelCase_ , output_array=lowerCamelCase_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=lowerCamelCase_ , iterations=10 , give_loss=lowerCamelCase_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 664 | 0 |
"""simple docstring"""
from string import ascii_uppercase
__UpperCAmelCase ={char: i for i, char in enumerate(ascii_uppercase)}
__UpperCAmelCase =dict(enumerate(ascii_uppercase))
def __a ( A , A ) -> str:
'''simple docstring'''
A__ = len(lowerCamelCase_ )
A__ = 0
while True:
if x == i:
A__ = 0
if len(lowerCamelCase_ ) == len(lowerCamelCase_ ):
break
key += key[i]
i += 1
return key
def __a ( A , A ) -> Dict:
'''simple docstring'''
A__ = ""
A__ = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
A__ = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __a ( A , A ) -> Tuple:
'''simple docstring'''
A__ = ""
A__ = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
A__ = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __a ( ) -> List[str]:
'''simple docstring'''
A__ = "THE GERMAN ATTACK"
A__ = "SECRET"
A__ = generate_key(lowerCamelCase_ , lowerCamelCase_ )
A__ = cipher_text(lowerCamelCase_ , lowerCamelCase_ )
print(f"""Encrypted Text = {s}""" )
print(f"""Original Text = {original_text(lowerCamelCase_ , lowerCamelCase_ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 337 |
'''simple docstring'''
import torch
from transformers import AutoModel
class UpperCamelCase_ ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Any , _lowerCamelCase : Optional[int]="sayef/fsner-bert-base-uncased" ) -> List[Any]:
super(_lowerCamelCase , self ).__init__()
__magic_name__ = AutoModel.from_pretrained(_lowerCamelCase , return_dict=_lowerCamelCase )
__magic_name__ = torch.nn.CosineSimilarity(3 , 1e-08 )
__magic_name__ = torch.nn.Softmax(dim=1 )
def __A ( self : Tuple , **_lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return self.bert(**_lowerCamelCase ).last_hidden_state
def __A ( self : Dict , _lowerCamelCase : Dict ) -> Dict:
return token_embeddings.sum(2 , keepdim=_lowerCamelCase )
def __A ( self : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Tuple=1 ) -> Optional[Any]:
return self.softmax(T * self.cos(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] ) -> List[str]:
__magic_name__ = W_supports["sizes"].tolist()
__magic_name__ = W_supports["start_token_id"].item()
__magic_name__ = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__magic_name__ = self.BERT(**_lowerCamelCase )
__magic_name__ = self.BERT(**_lowerCamelCase )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = W_supports["input_ids"] == start_token_id
__magic_name__ = W_supports["input_ids"] == end_token_id
for i, size in enumerate(_lowerCamelCase ):
if i == 0:
__magic_name__ = 0
else:
__magic_name__ = support_sizes[i - 1]
__magic_name__ = S[s : s + size][start_token_masks[s : s + size]]
__magic_name__ = S[s : s + size][end_token_masks[s : s + size]]
__magic_name__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__magic_name__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__magic_name__ = torch.vstack((p_starts, p_start) )
__magic_name__ = torch.vstack((p_ends, p_end) )
else:
__magic_name__ = p_start
__magic_name__ = p_end
return p_starts, p_ends
| 664 | 0 |
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
lowerCamelCase : Any = set()
# keep track of all the paths to be checked
lowerCamelCase : Tuple = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowerCamelCase : str = queue.pop(0 )
# get the last node from the path
lowerCamelCase : str = path[-1]
if node not in explored:
lowerCamelCase : str = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowerCamelCase : Optional[int] = list(lowerCamelCase_ )
new_path.append(lowerCamelCase_ )
queue.append(lowerCamelCase_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCamelCase_ )
# in case there's no path between the 2 nodes
return []
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowerCamelCase : Optional[int] = [start]
lowerCamelCase : Tuple = set(lowerCamelCase_ )
# Keep tab on distances from `start` node.
lowerCamelCase : Optional[int] = {start: 0, target: -1}
while queue:
lowerCamelCase : Tuple = queue.pop(0 )
if node == target:
lowerCamelCase : str = (
dist[node] if dist[target] == -1 else min(dist[target] ,dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCamelCase_ )
queue.append(lowerCamelCase_ )
lowerCamelCase : Optional[int] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 311 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 664 | 0 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = tmp_path / "cache"
lowerCamelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase = SqlDatasetReader(
"dataset" , "sqlite:///" + sqlite_path , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ ).read()
_check_sql_dataset(lowerCamelCase_ , lowerCamelCase_ )
@require_sqlalchemy
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = tmp_path / "cache"
lowerCamelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase = features.copy() if features else default_expected_features
lowerCamelCase = (
Features({feature: Value(lowerCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ ).read()
_check_sql_dataset(lowerCamelCase_ , lowerCamelCase_ )
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
with contextlib.closing(sqlitea.connect(lowerCamelCase_ ) ) as con:
lowerCamelCase = con.cursor()
cur.execute("SELECT * FROM dataset" )
for row in cur:
yield row
@require_sqlalchemy
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = tmp_path / "cache"
lowerCamelCase = os.path.join(lowerCamelCase_ , "tmp.sql" )
lowerCamelCase = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowerCamelCase_ ).read()
SqlDatasetWriter(lowerCamelCase_ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=1 ).write()
lowerCamelCase = iter_sql_file(lowerCamelCase_ )
lowerCamelCase = iter_sql_file(lowerCamelCase_ )
for rowa, rowa in zip(lowerCamelCase_ , lowerCamelCase_ ):
assert rowa == rowa
@require_sqlalchemy
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = tmp_path / "cache"
lowerCamelCase = os.path.join(lowerCamelCase_ , "tmp.sql" )
lowerCamelCase = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowerCamelCase_ ).read()
SqlDatasetWriter(lowerCamelCase_ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=2 ).write()
lowerCamelCase = iter_sql_file(lowerCamelCase_ )
lowerCamelCase = iter_sql_file(lowerCamelCase_ )
for rowa, rowa in zip(lowerCamelCase_ , lowerCamelCase_ ):
assert rowa == rowa
@require_sqlalchemy
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = tmp_path / "cache"
lowerCamelCase = os.path.join(lowerCamelCase_ , "tmp.sql" )
lowerCamelCase = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowerCamelCase_ ).read()
with pytest.raises(lowerCamelCase_ ):
SqlDatasetWriter(lowerCamelCase_ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=0 ).write() | 623 |
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def __snake_case ( lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
__magic_name__ = AutoConfig.from_pretrained(lowerCamelCase_ )
__magic_name__ = FlaxAutoModelForSeqaSeqLM.from_config(config=lowerCamelCase_ )
__magic_name__ = checkpoints.load_tax_checkpoint(lowerCamelCase_ )
__magic_name__ = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
__magic_name__ = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
__magic_name__ = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
__magic_name__ = F'layers_{str(lowerCamelCase_ )}'
# Self-Attention
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
__magic_name__ = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
__magic_name__ = flax_model.params["encoder"]["block"][str(lowerCamelCase_ )]["layer"]
__magic_name__ = tax_attention_key
__magic_name__ = tax_attention_out
__magic_name__ = tax_attention_query
__magic_name__ = tax_attention_value
__magic_name__ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = tax_global_layer_norm
if split_mlp_wi:
__magic_name__ = tax_mlp_wi_a
__magic_name__ = tax_mlp_wi_a
else:
__magic_name__ = tax_mlp_wi
__magic_name__ = tax_mlp_wo
__magic_name__ = tax_mlp_layer_norm
__magic_name__ = flax_model_encoder_layer_block
# Only for layer 0:
__magic_name__ = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
__magic_name__ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
__magic_name__ = tax_encoder_global_rel_embedding
# Assigning
__magic_name__ = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
__magic_name__ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
__magic_name__ = F'layers_{str(lowerCamelCase_ )}'
# Self-Attention
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
__magic_name__ = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
__magic_name__ = tax_enc_dec_attention_module["key"]["kernel"]
__magic_name__ = tax_enc_dec_attention_module["out"]["kernel"]
__magic_name__ = tax_enc_dec_attention_module["query"]["kernel"]
__magic_name__ = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
__magic_name__ = flax_model.params["decoder"]["block"][str(lowerCamelCase_ )]["layer"]
__magic_name__ = tax_attention_key
__magic_name__ = tax_attention_out
__magic_name__ = tax_attention_query
__magic_name__ = tax_attention_value
__magic_name__ = tax_pre_attention_layer_norm
__magic_name__ = tax_enc_dec_attention_key
__magic_name__ = tax_enc_dec_attention_out
__magic_name__ = tax_enc_dec_attention_query
__magic_name__ = tax_enc_dec_attention_value
__magic_name__ = tax_cross_layer_norm
if split_mlp_wi:
__magic_name__ = tax_mlp_wi_a
__magic_name__ = tax_mlp_wi_a
else:
__magic_name__ = tax_mlp_wi
__magic_name__ = tax_mlp_wo
__magic_name__ = txa_mlp_layer_norm
__magic_name__ = flax_model_decoder_layer_block
# Decoder Normalization
__magic_name__ = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
__magic_name__ = txa_decoder_norm
# Only for layer 0:
__magic_name__ = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
__magic_name__ = tax_decoder_rel_embedding
# Token Embeddings
__magic_name__ = tax_model["target"]["token_embedder"]["embedding"]
__magic_name__ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
__magic_name__ = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(lowerCamelCase_ )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
__magic_name__ : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
__magic_name__ : Optional[int] =parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 664 | 0 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = '''ClapFeatureExtractor'''
a_ = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : Tuple , __A : List[str] , __A : Dict ):
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__( self : int , __A : Optional[int]=None , __A : List[str]=None , __A : int=None , **__A : Any ):
snake_case__ : Union[str, Any] = kwargs.pop("sampling_rate" , _lowerCamelCase )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
snake_case__ : List[str] = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
if audios is not None:
snake_case__ : Union[str, Any] = self.feature_extractor(
_lowerCamelCase , sampling_rate=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
if text is not None and audios is not None:
snake_case__ : Union[str, Any] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase )
def _lowercase ( self : Dict , *__A : Optional[int] , **__A : str ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def _lowercase ( self : List[str] , *__A : List[str] , **__A : Optional[int] ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def _lowercase ( self : int ):
snake_case__ : Optional[Any] = self.tokenizer.model_input_names
snake_case__ : Optional[int] = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 297 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCamelCase_ ( unittest.TestCase , A ):
"""simple docstring"""
def __A ( self : Optional[int] ) -> Any:
__magic_name__ = load_tool("text-to-speech" )
self.tool.setup()
def __A ( self : Union[str, Any] ) -> int:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__magic_name__ = self.tool("hey" )
__magic_name__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def __A ( self : List[str] ) -> int:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__magic_name__ = self.tool("hey" )
__magic_name__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 664 | 0 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self : Any , snake_case : Any , snake_case : List[Any]=1_3 , snake_case : List[Any]=3_0 , snake_case : Optional[Any]=2 , snake_case : int=3 , snake_case : Tuple=True , snake_case : Union[str, Any]=True , snake_case : Tuple=3_2 , snake_case : int=2 , snake_case : Optional[Any]=4 , snake_case : List[str]=3_7 , snake_case : Tuple="gelu" , snake_case : List[Any]=0.1 , snake_case : Dict=0.1 , snake_case : Optional[Any]=1_0 , snake_case : List[str]=0.02 , snake_case : Tuple=3 , snake_case : Optional[Any]=0.6 , snake_case : str=None , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Tuple = parent
UpperCamelCase_ : Optional[Any] = batch_size
UpperCamelCase_ : Any = image_size
UpperCamelCase_ : str = patch_size
UpperCamelCase_ : List[Any] = num_channels
UpperCamelCase_ : List[Any] = is_training
UpperCamelCase_ : Union[str, Any] = use_labels
UpperCamelCase_ : Optional[Any] = hidden_size
UpperCamelCase_ : List[Any] = num_hidden_layers
UpperCamelCase_ : Tuple = num_attention_heads
UpperCamelCase_ : Tuple = intermediate_size
UpperCamelCase_ : Dict = hidden_act
UpperCamelCase_ : Optional[Any] = hidden_dropout_prob
UpperCamelCase_ : Any = attention_probs_dropout_prob
UpperCamelCase_ : Optional[int] = type_sequence_label_size
UpperCamelCase_ : List[str] = initializer_range
UpperCamelCase_ : List[str] = mask_ratio
UpperCamelCase_ : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase_ : Optional[int] = (image_size // patch_size) ** 2
UpperCamelCase_ : Any = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ : Tuple = None
if self.use_labels:
UpperCamelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ : int = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : int , snake_case : Any , snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : List[str] = TFViTMAEModel(config=_lowerCamelCase )
UpperCamelCase_ : Dict = model(_lowerCamelCase , training=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : Dict , snake_case : Optional[int] , snake_case : Optional[Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = TFViTMAEForPreTraining(_lowerCamelCase )
UpperCamelCase_ : Dict = model(_lowerCamelCase , training=_lowerCamelCase )
# expected sequence length = num_patches
UpperCamelCase_ : Tuple = (self.image_size // self.patch_size) ** 2
UpperCamelCase_ : List[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase_ : Optional[int] = 1
UpperCamelCase_ : Optional[int] = TFViTMAEForPreTraining(_lowerCamelCase )
UpperCamelCase_ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_ : Any = model(_lowerCamelCase , training=_lowerCamelCase )
UpperCamelCase_ : Optional[int] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int:
"""simple docstring"""
UpperCamelCase_ : Any = self.prepare_config_and_inputs()
((UpperCamelCase_), (UpperCamelCase_), (UpperCamelCase_)) : Optional[int] = config_and_inputs
UpperCamelCase_ : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class _lowercase ( snake_case_ , snake_case_ , unittest.TestCase ):
lowercase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Dict = TFViTMAEModelTester(self )
UpperCamelCase_ : Any = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : List[Any] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase_ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , tf.keras.layers.Layer ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Optional[int] = model_class(_lowerCamelCase )
UpperCamelCase_ : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ : Any = [*signature.parameters.keys()]
UpperCamelCase_ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase_, UpperCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ : Any = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase_ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase_ : Optional[int] = model_class(_lowerCamelCase )
UpperCamelCase_ : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_ : Tuple = model(_lowerCamelCase , noise=_lowerCamelCase )
UpperCamelCase_ : List[Any] = copy.deepcopy(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
UpperCamelCase_ : Optional[Any] = model(**_lowerCamelCase , noise=_lowerCamelCase )
UpperCamelCase_ : Tuple = outputs_dict[0].numpy()
UpperCamelCase_ : Any = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase_, UpperCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ : int = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(snake_case : Optional[Any] ):
UpperCamelCase_ : List[Any] = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_lowerCamelCase ):
UpperCamelCase_ : Optional[int] = v.numpy()
else:
UpperCamelCase_ : Tuple = np.array(_lowerCamelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
UpperCamelCase_ : Union[str, Any] = model_class(_lowerCamelCase )
UpperCamelCase_ : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_ : Optional[int] = prepare_numpy_arrays(_lowerCamelCase )
UpperCamelCase_ : Union[str, Any] = model(_lowerCamelCase , noise=_lowerCamelCase )
UpperCamelCase_ : Optional[int] = model(**_lowerCamelCase , noise=_lowerCamelCase )
self.assert_outputs_same(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : str , snake_case : Union[str, Any] , snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase_ : List[str] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
UpperCamelCase_ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase_ : List[Any] = tf.constant(_lowerCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase_ : Optional[Any] = tf_noise
super().check_pt_tf_models(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Tuple:
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase_, UpperCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ : Dict = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_lowerCamelCase )
if module_member_name.endswith('MainLayer' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )]
for module_member in (getattr(_lowerCamelCase , _lowerCamelCase ),)
if isinstance(_lowerCamelCase , _lowerCamelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_lowerCamelCase , '_keras_serializable' , _lowerCamelCase )
}
UpperCamelCase_ : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase_ : Dict = tf.convert_to_tensor(_lowerCamelCase )
inputs_dict.update({'noise': noise} )
for main_layer_class in tf_main_layer_classes:
UpperCamelCase_ : Optional[int] = main_layer_class(_lowerCamelCase )
UpperCamelCase_ : Any = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
UpperCamelCase_ : Union[str, Any] = tf.keras.Model(_lowerCamelCase , outputs=main_layer(_lowerCamelCase ) )
UpperCamelCase_ : str = model(_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_ : str = os.path.join(_lowerCamelCase , 'keras_model.h5' )
model.save(_lowerCamelCase )
UpperCamelCase_ : str = tf.keras.models.load_model(
_lowerCamelCase , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_lowerCamelCase , tf.keras.Model )
UpperCamelCase_ : int = model(_lowerCamelCase )
self.assert_outputs_same(_lowerCamelCase , _lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase_, UpperCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase_ : Tuple = model_class(_lowerCamelCase )
UpperCamelCase_ : Dict = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_ : List[str] = model(_lowerCamelCase , noise=_lowerCamelCase )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase_ : Dict = outputs.last_hidden_state.numpy()
UpperCamelCase_ : Any = 0
else:
UpperCamelCase_ : List[Any] = outputs.logits.numpy()
UpperCamelCase_ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCamelCase , saved_model=_lowerCamelCase )
UpperCamelCase_ : int = model_class.from_pretrained(_lowerCamelCase )
UpperCamelCase_ : int = model(_lowerCamelCase , noise=_lowerCamelCase )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase_ : List[Any] = after_outputs['last_hidden_state'].numpy()
UpperCamelCase_ : List[str] = 0
else:
UpperCamelCase_ : Union[str, Any] = after_outputs['logits'].numpy()
UpperCamelCase_ : List[Any] = 0
UpperCamelCase_ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCamelCase , 1e-5 )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> int:
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase_, UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ : str = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase_ : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase_ : str = model_class(_lowerCamelCase )
UpperCamelCase_ : int = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_ : int = model(_lowerCamelCase , noise=_lowerCamelCase )
UpperCamelCase_ : List[str] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_lowerCamelCase )
UpperCamelCase_ : Any = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
UpperCamelCase_ : str = model_class.from_config(model.config )
UpperCamelCase_ : List[str] = new_model(_lowerCamelCase ) # Build model
new_model.set_weights(model.get_weights() )
UpperCamelCase_ : Dict = new_model(_lowerCamelCase , noise=_lowerCamelCase )
self.assert_outputs_same(_lowerCamelCase , _lowerCamelCase )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(_lowerCamelCase )
def __lowercase ( ):
UpperCamelCase_ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase_ : int = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' )
UpperCamelCase_ : Tuple = self.default_image_processor
UpperCamelCase_ : Optional[int] = prepare_img()
UpperCamelCase_ : Optional[int] = image_processor(images=_lowerCamelCase , return_tensors='tf' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase_ : Optional[int] = ViTMAEConfig()
UpperCamelCase_ : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase_ : Tuple = np.random.uniform(size=(1, num_patches) )
# forward pass
UpperCamelCase_ : Optional[Any] = model(**_lowerCamelCase , noise=_lowerCamelCase )
# verify the logits
UpperCamelCase_ : Optional[Any] = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
UpperCamelCase_ : Optional[int] = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
| 417 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__magic_name__ : Dict =re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
__magic_name__ : int =10
__magic_name__ : Union[str, Any] =2_56
def __snake_case ( lowerCamelCase_ : List[str] ):
'''simple docstring'''
if len(lowerCamelCase_ ) < MIN_NUM_TOKENS:
return None
__magic_name__ = MinHash(num_perm=lowerCamelCase_ )
for token in set(lowerCamelCase_ ):
min_hash.update(token.encode() )
return min_hash
def __snake_case ( lowerCamelCase_ : str ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(lowerCamelCase_ ) if len(t.strip() ) > 0}
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , *,
_lowerCamelCase : float = 0.85 , ) -> Optional[Any]:
__magic_name__ = duplication_jaccard_threshold
__magic_name__ = NUM_PERM
__magic_name__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__magic_name__ = defaultdict(_lowerCamelCase )
def __A ( self : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : MinHash ) -> None:
__magic_name__ = self._index.query(_lowerCamelCase )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_lowerCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_lowerCamelCase )
def __A ( self : Union[str, Any] ) -> List[List[Dict]]:
__magic_name__ = []
for base, duplicates in self._duplicate_clusters.items():
__magic_name__ = [base] + list(_lowerCamelCase )
# reformat the cluster to be a list of dict
__magic_name__ = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(_lowerCamelCase )
return duplicate_clusters
def __A ( self : Tuple , _lowerCamelCase : Tuple ) -> None:
__magic_name__ = self.get_duplicate_clusters()
with open(_lowerCamelCase , "w" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
def __snake_case ( lowerCamelCase_ : List[Any] ):
'''simple docstring'''
__magic_name__ , __magic_name__ = element
__magic_name__ = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def __snake_case ( lowerCamelCase_ : Type[Dataset] ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCamelCase_ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def __snake_case ( lowerCamelCase_ : Type[Dataset] , lowerCamelCase_ : float ):
'''simple docstring'''
__magic_name__ = DuplicationIndex(duplication_jaccard_threshold=lowerCamelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCamelCase_ ) ) , max_queue_size=100 ) ):
di.add(lowerCamelCase_ , lowerCamelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
'''simple docstring'''
__magic_name__ = get_tokens(lowerCamelCase_ )
__magic_name__ = get_tokens(lowerCamelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__magic_name__ : List[str] =None
def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
__magic_name__ = []
for elementa in cluster:
__magic_name__ = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
__magic_name__ = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(lowerCamelCase_ , lowerCamelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__magic_name__ = 1
extremes.append(lowerCamelCase_ )
return extremes
def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
global _shared_dataset
__magic_name__ = dataset
__magic_name__ = []
__magic_name__ = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCamelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCamelCase_ , lowerCamelCase_ , ) , total=len(lowerCamelCase_ ) , ):
extremes_list.append(lowerCamelCase_ )
return extremes_list
def __snake_case ( lowerCamelCase_ : Type[Dataset] , lowerCamelCase_ : float = 0.85 ):
'''simple docstring'''
__magic_name__ = make_duplicate_clusters(lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
__magic_name__ = {}
__magic_name__ = find_extremes(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for extremes in extremes_clusters:
for element in extremes:
__magic_name__ = element
__magic_name__ = duplicate_indices - set(extreme_dict.keys() )
__magic_name__ = dataset.filter(lambda lowerCamelCase_ , lowerCamelCase_ : idx not in remove_indices , with_indices=lowerCamelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__magic_name__ = element["base_index"] in extreme_dict
if element["is_extreme"]:
__magic_name__ = extreme_dict[element["base_index"]]["copies"]
print(F'Original dataset size: {len(lowerCamelCase_ )}' )
print(F'Number of duplicate clusters: {len(lowerCamelCase_ )}' )
print(F'Files in duplicate cluster: {len(lowerCamelCase_ )}' )
print(F'Unique files in duplicate cluster: {len(lowerCamelCase_ )}' )
print(F'Filtered dataset size: {len(lowerCamelCase_ )}' )
return ds_filter, duplicate_clusters
| 664 | 0 |
'''simple docstring'''
UpperCamelCase_ = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 384 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
__magic_name__ : Optional[int] =logging.get_logger(__name__)
__magic_name__ : Tuple ='The Nymphenburg Palace is a beautiful palace in Munich!'
def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
'''simple docstring'''
__magic_name__ = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1e-5,
"token_type_vocab_size": 2,
}
__magic_name__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__magic_name__ = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , lowerCamelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__magic_name__ = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
__magic_name__ = os.path.join(get_home_dir() , "models" )
__magic_name__ = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ )
__magic_name__ = nlp.model.BERTModel(
lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , )
original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ )
__magic_name__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
__magic_name__ = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(lowerCamelCase_ ),
}
__magic_name__ = BertConfig.from_dict(lowerCamelCase_ )
__magic_name__ = BertForMaskedLM(lowerCamelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase_ : Any ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int ):
__magic_name__ = hf_param.shape
__magic_name__ = to_torch(params[gluon_param] )
__magic_name__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__magic_name__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__magic_name__ = hf_bort_model.bert.encoder.layer[i]
# self attention
__magic_name__ = layer.attention.self
__magic_name__ = check_and_map_params(
self_attn.key.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
__magic_name__ = check_and_map_params(
self_attn.key.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
__magic_name__ = check_and_map_params(
self_attn.query.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
__magic_name__ = check_and_map_params(
self_attn.query.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
__magic_name__ = check_and_map_params(
self_attn.value.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
__magic_name__ = check_and_map_params(
self_attn.value.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
__magic_name__ = layer.attention.output
__magic_name__ = check_and_map_params(
self_output.dense.bias , F'encoder.transformer_cells.{i}.proj.bias' )
__magic_name__ = check_and_map_params(
self_output.dense.weight , F'encoder.transformer_cells.{i}.proj.weight' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.layer_norm.beta' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
__magic_name__ = layer.intermediate
__magic_name__ = check_and_map_params(
intermediate.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
__magic_name__ = check_and_map_params(
intermediate.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
__magic_name__ = layer.output
__magic_name__ = check_and_map_params(
bert_output.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
__magic_name__ = check_and_map_params(
bert_output.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__magic_name__ = RobertaTokenizer.from_pretrained("roberta-base" )
__magic_name__ = tokenizer.encode_plus(lowerCamelCase_ )["input_ids"]
# Get gluon output
__magic_name__ = mx.nd.array([input_ids] )
__magic_name__ = original_bort(inputs=lowerCamelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase_ )
__magic_name__ = BertModel.from_pretrained(lowerCamelCase_ )
hf_bort_model.eval()
__magic_name__ = tokenizer.encode_plus(lowerCamelCase_ , return_tensors="pt" )
__magic_name__ = hf_bort_model(**lowerCamelCase_ )[0]
__magic_name__ = output_gluon[0].asnumpy()
__magic_name__ = output_hf[0].detach().numpy()
__magic_name__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__magic_name__ = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , lowerCamelCase_ )
if __name__ == "__main__":
__magic_name__ : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ : Optional[Any] =parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 664 | 0 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase () -> Optional[int]:
'''simple docstring'''
lowerCAmelCase = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowerCamelCase_ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowerCamelCase_ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowerCamelCase_ )
return parser.parse_args()
def lowercase () -> Any:
'''simple docstring'''
lowerCAmelCase = parse_args()
# Import training_script as a module.
lowerCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCAmelCase = script_fpath.stem
lowerCAmelCase = importlib.import_module(lowerCamelCase_ )
# Patch sys.argv
lowerCAmelCase = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 169 |
'''simple docstring'''
def __snake_case ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
__magic_name__ = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
__magic_name__ = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
__magic_name__ = max(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase_ ) , b_binary.zfill(lowerCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 664 | 0 |
UpperCamelCase_ : str = '0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 461 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__magic_name__ : Tuple =threading.Lock()
__magic_name__ : Optional[logging.Handler] =None
__magic_name__ : List[str] ={
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
__magic_name__ : str =logging.WARNING
__magic_name__ : Any =True
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = os.getenv("TRANSFORMERS_VERBOSITY" , lowerCamelCase_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def __snake_case ( ):
'''simple docstring'''
return __name__.split("." )[0]
def __snake_case ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def __snake_case ( ):
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__magic_name__ = logging.StreamHandler() # Set sys.stderr as stream.
__magic_name__ = sys.stderr.flush
# Apply our default configuration to the library root logger.
__magic_name__ = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__magic_name__ = False
def __snake_case ( ):
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
__magic_name__ = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__magic_name__ = None
def __snake_case ( ):
'''simple docstring'''
return log_levels
def __snake_case ( lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
if name is None:
__magic_name__ = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __snake_case ( lowerCamelCase_ : int ):
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __snake_case ( lowerCamelCase_ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(lowerCamelCase_ )
def __snake_case ( lowerCamelCase_ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
__magic_name__ = False
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
__magic_name__ = True
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = _get_library_root_logger().handlers
for handler in handlers:
__magic_name__ = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(lowerCamelCase_ )
def __snake_case ( self : Union[str, Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : Any ):
'''simple docstring'''
__magic_name__ = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , lowerCamelCase_ )
if no_advisory_warnings:
return
self.warning(*lowerCamelCase_ , **lowerCamelCase_ )
__magic_name__ : int =warning_advice
@functools.lru_cache(lowerCamelCase_ )
def __snake_case ( self : Dict , *lowerCamelCase_ : int , **lowerCamelCase_ : int ):
'''simple docstring'''
self.warning(*lowerCamelCase_ , **lowerCamelCase_ )
__magic_name__ : Optional[int] =warning_once
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , *_lowerCamelCase : Tuple , **_lowerCamelCase : Optional[Any] ) -> Any: # pylint: disable=unused-argument
__magic_name__ = args[0] if args else None
def __iter__( self : int ) -> Tuple:
return iter(self._iterator )
def __getattr__( self : List[Any] , _lowerCamelCase : int ) -> List[Any]:
def empty_fn(*_lowerCamelCase : List[str] , **_lowerCamelCase : List[str] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[Any] ) -> Any:
return self
def __exit__( self : int , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] ) -> Dict:
return
class UpperCamelCase_ :
"""simple docstring"""
def __call__( self : Any , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Any ) -> List[Any]:
if _tqdm_active:
return tqdm_lib.tqdm(*_lowerCamelCase , **_lowerCamelCase )
else:
return EmptyTqdm(*_lowerCamelCase , **_lowerCamelCase )
def __A ( self : Optional[Any] , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Dict ) -> Union[str, Any]:
__magic_name__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_lowerCamelCase , **_lowerCamelCase )
def __A ( self : str ) -> Any:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__magic_name__ : List[Any] =_tqdm_cls()
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
__magic_name__ = True
hf_hub_utils.enable_progress_bars()
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
__magic_name__ = False
hf_hub_utils.disable_progress_bars()
| 664 | 0 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class lowerCamelCase ( unittest.TestCase , __lowerCamelCase ):
def snake_case__ ( self :Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_tool('''text-to-speech''' )
self.tool.setup()
def snake_case__ ( self :Union[str, Any] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = self.tool('''hey''' )
SCREAMING_SNAKE_CASE = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
def snake_case__ ( self :List[str] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = self.tool('''hey''' )
SCREAMING_SNAKE_CASE = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) ) | 201 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : Union[str, Any] ={'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str =[
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__magic_name__ : List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 664 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : int = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = '''imagegpt'''
_UpperCamelCase = ['''past_key_values''']
_UpperCamelCase = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,_lowerCAmelCase=5_12 + 1 ,_lowerCAmelCase=32 * 32 ,_lowerCAmelCase=5_12 ,_lowerCAmelCase=24 ,_lowerCAmelCase=8 ,_lowerCAmelCase=None ,_lowerCAmelCase="quick_gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scale_attn_weights
lowerCamelCase__ = use_cache
lowerCamelCase__ = scale_attn_by_inverse_layer_idx
lowerCamelCase__ = reorder_and_upcast_attn
lowerCamelCase__ = tie_word_embeddings
super().__init__(tie_word_embeddings=_lowerCamelCase ,**_lowerCamelCase )
class UpperCamelCase__ (a ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ):
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,_lowerCAmelCase = 3 ,_lowerCAmelCase = 32 ,_lowerCAmelCase = 32 ,):
lowerCamelCase__ = self._generate_dummy_images(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
lowerCamelCase__ = dict(preprocessor(images=_lowerCamelCase ,return_tensors=_lowerCamelCase ) )
return inputs
| 50 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ : Optional[Any] ={
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : int =['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Dict =[
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Tuple =[
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__magic_name__ : int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 664 | 0 |
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def UpperCAmelCase_ ( A , A , A ):
'''simple docstring'''
_a : List[Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
_a : List[str] = FlaxAutoModelForSeqaSeqLM.from_config(config=lowerCamelCase_ )
_a : Any = checkpoints.load_tax_checkpoint(lowerCamelCase_ )
_a : List[Any] = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp']
if config.model_type == "t5":
_a : Tuple = 'SelfAttention'
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_a : int = 'LocalSelfAttention'
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a : int = 'TransientGlobalSelfAttention'
else:
raise ValueError(
'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'
' attribute with a value from [\'local\', \'transient-global].' )
# Encoder
for layer_index in range(config.num_layers ):
_a : Union[str, Any] = f'''layers_{str(lowerCamelCase_ )}'''
# Self-Attention
_a : Any = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel']
_a : Any = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel']
_a : Dict = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel']
_a : Dict = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a : List[Any] = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
# Layer Normalization
_a : Tuple = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
_a : Any = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
_a : Optional[int] = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
_a : Any = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
_a : Any = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
_a : Optional[Any] = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
_a : Optional[int] = flax_model.params['encoder']['block'][str(lowerCamelCase_ )]['layer']
_a : Tuple = tax_attention_key
_a : Dict = tax_attention_out
_a : Union[str, Any] = tax_attention_query
_a : Any = tax_attention_value
_a : Tuple = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a : str = tax_global_layer_norm
if split_mlp_wi:
_a : Union[str, Any] = tax_mlp_wi_a
_a : Dict = tax_mlp_wi_a
else:
_a : Any = tax_mlp_wi
_a : List[Any] = tax_mlp_wo
_a : Optional[int] = tax_mlp_layer_norm
_a : List[Any] = flax_model_encoder_layer_block
# Only for layer 0:
_a : List[Any] = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T
_a : List[str] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a : List[Any] = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
_a : List[str] = tax_encoder_global_rel_embedding
# Assigning
_a : Dict = tax_model['target']['encoder']['encoder_norm']['scale']
_a : List[str] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_a : Any = f'''layers_{str(lowerCamelCase_ )}'''
# Self-Attention
_a : int = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
_a : Optional[int] = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
_a : Union[str, Any] = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
_a : Dict = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
# Layer Normalization
_a : str = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][
'scale'
]
# Encoder-Decoder-Attention
_a : int = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention']
_a : List[Any] = tax_enc_dec_attention_module['key']['kernel']
_a : Any = tax_enc_dec_attention_module['out']['kernel']
_a : List[str] = tax_enc_dec_attention_module['query']['kernel']
_a : Optional[int] = tax_enc_dec_attention_module['value']['kernel']
# Layer Normalization
_a : str = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
# MLP
if split_mlp_wi:
_a : str = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
_a : Optional[Any] = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
_a : List[str] = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
_a : Optional[int] = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
_a : Optional[int] = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
_a : str = flax_model.params['decoder']['block'][str(lowerCamelCase_ )]['layer']
_a : List[str] = tax_attention_key
_a : Union[str, Any] = tax_attention_out
_a : Optional[Any] = tax_attention_query
_a : str = tax_attention_value
_a : Any = tax_pre_attention_layer_norm
_a : Any = tax_enc_dec_attention_key
_a : Union[str, Any] = tax_enc_dec_attention_out
_a : Dict = tax_enc_dec_attention_query
_a : str = tax_enc_dec_attention_value
_a : Optional[Any] = tax_cross_layer_norm
if split_mlp_wi:
_a : Dict = tax_mlp_wi_a
_a : Union[str, Any] = tax_mlp_wi_a
else:
_a : Union[str, Any] = tax_mlp_wi
_a : Optional[Any] = tax_mlp_wo
_a : Tuple = txa_mlp_layer_norm
_a : Union[str, Any] = flax_model_decoder_layer_block
# Decoder Normalization
_a : Union[str, Any] = tax_model['target']['decoder']['decoder_norm']['scale']
_a : Any = txa_decoder_norm
# Only for layer 0:
_a : Dict = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T
_a : List[Any] = tax_decoder_rel_embedding
# Token Embeddings
_a : List[Any] = tax_model['target']['token_embedder']['embedding']
_a : List[str] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_a : int = tax_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(lowerCamelCase_ )
print('T5X Model was sucessfully converted!' )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 120 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
__magic_name__ : str ={
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
__magic_name__ : Tuple ={
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def __snake_case ( lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
__magic_name__ = (images / 2 + 0.5).clamp(0 , 1 )
__magic_name__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__magic_name__ = numpy_to_pil(lowerCamelCase_ )
return images
def __snake_case ( lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
if images.ndim == 3:
__magic_name__ = images[None, ...]
__magic_name__ = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__magic_name__ = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
__magic_name__ = [Image.fromarray(lowerCamelCase_ ) for image in images]
return pil_images
| 664 | 0 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ='▁'
__UpperCAmelCase ={
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
__UpperCAmelCase ={
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
__UpperCAmelCase ={
'facebook/s2t-small-librispeech-asr': 1024,
}
__UpperCAmelCase =['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
__UpperCAmelCase ={'mustc': MUSTC_LANGS}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
lowercase__ : Optional[Any] = VOCAB_FILES_NAMES
lowercase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : List[str] = MAX_MODEL_INPUT_SIZES
lowercase__ : Optional[int] = ['''input_ids''', '''attention_mask''']
lowercase__ : List[int] = []
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<unk>" , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = None , **UpperCamelCase__ , ):
'''simple docstring'''
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , do_upper_case=_lowerCamelCase , do_lower_case=_lowerCamelCase , tgt_lang=_lowerCamelCase , lang_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
A__ = do_upper_case
A__ = do_lower_case
A__ = load_json(_lowerCamelCase )
A__ = {v: k for k, v in self.encoder.items()}
A__ = spm_file
A__ = load_spm(_lowerCamelCase , self.sp_model_kwargs )
if lang_codes is not None:
A__ = lang_codes
A__ = LANGUAGES[lang_codes]
A__ = [f"""<lang:{lang}>""" for lang in self.langs]
A__ = {lang: self.sp_model.PieceToId(f"""<lang:{lang}>""" ) for lang in self.langs}
A__ = self.lang_tokens
A__ = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
A__ = {}
@property
def lowercase_ ( self ):
'''simple docstring'''
return len(self.encoder )
@property
def lowercase_ ( self ):
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = new_tgt_lang
self.set_tgt_lang_special_tokens(_lowerCamelCase )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = self.lang_code_to_id[tgt_lang]
A__ = [lang_code_id]
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
return self.decoder.get(_lowerCamelCase , self.unk_token )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = []
A__ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
A__ = self.sp_model.decode(_lowerCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
A__ = []
else:
current_sub_tokens.append(_lowerCamelCase )
A__ = self.sp_model.decode(_lowerCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__=None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
A__ = [1] * len(self.prefix_tokens )
A__ = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ = {}
A__ = load_spm(self.spm_file , self.sp_model_kwargs )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
'''simple docstring'''
A__ = Path(_lowerCamelCase )
assert save_dir.is_dir(), f"""{save_directory} should be a directory"""
A__ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
A__ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , _lowerCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowerCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(_lowerCamelCase , "wb" ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (str(_lowerCamelCase ), str(_lowerCamelCase ))
def __a ( A , A ) -> Optional[int]:
'''simple docstring'''
A__ = sentencepiece.SentencePieceProcessor(**lowerCamelCase_ )
spm.Load(str(lowerCamelCase_ ) )
return spm
def __a ( A ) -> Union[str, Any]:
'''simple docstring'''
with open(lowerCamelCase_ , "r" ) as f:
return json.load(lowerCamelCase_ )
def __a ( A , A ) -> List[Any]:
'''simple docstring'''
with open(lowerCamelCase_ , "w" ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=2 ) | 337 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__magic_name__ : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(
A , r'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __A ( self : Any , _lowerCamelCase : GenericTensor ) -> np.ndarray:
if self.framework == "tf":
__magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase )
else:
raise ValueError("Unsupported framework" )
return masked_index
def __A ( self : str , _lowerCamelCase : GenericTensor ) -> np.ndarray:
__magic_name__ = self.get_masked_index(_lowerCamelCase )
__magic_name__ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def __A ( self : int , _lowerCamelCase : GenericTensor ) -> Any:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowerCamelCase )
def __A ( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Any=None , **_lowerCamelCase : List[str] ) -> Dict[str, GenericTensor]:
if return_tensors is None:
__magic_name__ = self.framework
__magic_name__ = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase )
self.ensure_exactly_one_mask_token(_lowerCamelCase )
return model_inputs
def __A ( self : List[str] , _lowerCamelCase : int ) -> List[Any]:
__magic_name__ = self.model(**_lowerCamelCase )
__magic_name__ = model_inputs["input_ids"]
return model_outputs
def __A ( self : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any]=5 , _lowerCamelCase : Dict=None ) -> Dict:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__magic_name__ = target_ids.shape[0]
__magic_name__ = model_outputs["input_ids"][0]
__magic_name__ = model_outputs["logits"]
if self.framework == "tf":
__magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__magic_name__ = outputs.numpy()
__magic_name__ = outputs[0, masked_index, :]
__magic_name__ = stable_softmax(_lowerCamelCase , axis=-1 )
if target_ids is not None:
__magic_name__ = tf.gather_nd(tf.squeeze(_lowerCamelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
__magic_name__ = tf.expand_dims(_lowerCamelCase , 0 )
__magic_name__ = tf.math.top_k(_lowerCamelCase , k=_lowerCamelCase )
__magic_name__ , __magic_name__ = topk.values.numpy(), topk.indices.numpy()
else:
__magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__magic_name__ = outputs[0, masked_index, :]
__magic_name__ = logits.softmax(dim=-1 )
if target_ids is not None:
__magic_name__ = probs[..., target_ids]
__magic_name__ , __magic_name__ = probs.topk(_lowerCamelCase )
__magic_name__ = []
__magic_name__ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__magic_name__ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__magic_name__ = input_ids.numpy().copy()
if target_ids is not None:
__magic_name__ = target_ids[p].tolist()
__magic_name__ = p
# Filter padding out:
__magic_name__ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__magic_name__ = self.tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
__magic_name__ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(_lowerCamelCase )
result.append(_lowerCamelCase )
if single_mask:
return result[0]
return result
def __A ( self : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[Any]=None ) -> List[str]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = [targets]
try:
__magic_name__ = self.tokenizer.get_vocab()
except Exception:
__magic_name__ = {}
__magic_name__ = []
for target in targets:
__magic_name__ = vocab.get(_lowerCamelCase , _lowerCamelCase )
if id_ is None:
__magic_name__ = self.tokenizer(
_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , max_length=1 , truncation=_lowerCamelCase , )["input_ids"]
if len(_lowerCamelCase ) == 0:
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
"We cannot replace it with anything meaningful, ignoring it" )
continue
__magic_name__ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
__magic_name__ = list(set(_lowerCamelCase ) )
if len(_lowerCamelCase ) == 0:
raise ValueError("At least one target must be provided when passed." )
__magic_name__ = np.array(_lowerCamelCase )
return target_ids
def __A ( self : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : int=None ) -> Tuple:
__magic_name__ = {}
if targets is not None:
__magic_name__ = self.get_target_ids(_lowerCamelCase , _lowerCamelCase )
__magic_name__ = target_ids
if top_k is not None:
__magic_name__ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__( self : int , _lowerCamelCase : Any , *_lowerCamelCase : str , **_lowerCamelCase : int ) -> Optional[int]:
__magic_name__ = super().__call__(_lowerCamelCase , **_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) == 1:
return outputs[0]
return outputs
| 664 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=2 , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=10 , UpperCamelCase__=3 , UpperCamelCase__=32 * 4 , UpperCamelCase__=32 * 6 , UpperCamelCase__=4 , UpperCamelCase__=32 , ) -> Any:
lowerCamelCase : List[Any] = parent
lowerCamelCase : Optional[int] = batch_size
lowerCamelCase : Tuple = is_training
lowerCamelCase : str = use_auxiliary_loss
lowerCamelCase : Union[str, Any] = num_queries
lowerCamelCase : Any = num_channels
lowerCamelCase : str = min_size
lowerCamelCase : Tuple = max_size
lowerCamelCase : Optional[Any] = num_labels
lowerCamelCase : int = mask_feature_size
def _lowercase ( self ) -> Tuple:
lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCamelCase )
lowerCamelCase : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCamelCase )
lowerCamelCase : int = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCamelCase ) > 0.5
).float()
lowerCamelCase : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCamelCase ) > 0.5).long()
lowerCamelCase : List[Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _lowercase ( self ) -> Union[str, Any]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def _lowercase ( self ) -> List[str]:
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = self.prepare_config_and_inputs()
lowerCamelCase : Union[str, Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
lowerCamelCase : Union[str, Any] = output.encoder_hidden_states
lowerCamelCase : Dict = output.pixel_decoder_hidden_states
lowerCamelCase : Dict = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , config.decoder_config.decoder_layers )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> int:
with torch.no_grad():
lowerCamelCase : List[str] = MaskFormerModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCamelCase : Optional[int] = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
lowerCamelCase : List[str] = model(_lowerCamelCase , output_hidden_states=_lowerCamelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCamelCase , _lowerCamelCase )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
lowerCamelCase : Union[str, Any] = MaskFormerForInstanceSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
def comm_check_on_output(UpperCamelCase__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCamelCase : List[str] = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
lowerCamelCase : int = model(_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
lowerCamelCase : int = model(
pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : int = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
lowerCamelCase_ : Tuple = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
lowerCamelCase_ : List[str] = False
lowerCamelCase_ : Optional[Any] = False
lowerCamelCase_ : Union[str, Any] = False
lowerCamelCase_ : List[str] = False
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : Optional[int] = MaskFormerModelTester(self )
lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def _lowercase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowercase ( self ) -> int:
lowerCamelCase , lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def _lowercase ( self ) -> Dict:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_lowerCamelCase )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def _lowercase ( self ) -> Optional[int]:
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def _lowercase ( self ) -> Any:
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def _lowercase ( self ) -> Dict:
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def _lowercase ( self ) -> int:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _lowercase ( self ) -> Union[str, Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self ) -> Tuple:
pass
def _lowercase ( self ) -> Tuple:
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(_lowerCamelCase )
lowerCamelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
lowerCamelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
@slow
def _lowercase ( self ) -> Optional[int]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCamelCase : Optional[Any] = MaskFormerModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _lowercase ( self ) -> List[str]:
lowerCamelCase : List[str] = (self.model_tester.min_size,) * 2
lowerCamelCase : Optional[Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=_lowerCamelCase ),
"mask_labels": torch.randn((2, 10, *size) , device=_lowerCamelCase ),
"class_labels": torch.zeros(2 , 10 , device=_lowerCamelCase ).long(),
}
lowerCamelCase : str = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_lowerCamelCase )
lowerCamelCase : Tuple = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
def _lowercase ( self ) -> List[str]:
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase , lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Dict = model_class(_lowerCamelCase ).to(_lowerCamelCase )
lowerCamelCase : List[str] = model(**_lowerCamelCase , output_attentions=_lowerCamelCase )
self.assertTrue(outputs.attentions is not None )
def _lowercase ( self ) -> Optional[int]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCamelCase : Union[str, Any] = self.all_model_classes[1]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
lowerCamelCase : int = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
lowerCamelCase : Dict = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase ).loss
loss.backward()
def _lowercase ( self ) -> Any:
# only MaskFormerForInstanceSegmentation has the loss
lowerCamelCase : Tuple = self.all_model_classes[1]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
lowerCamelCase : Optional[Any] = True
lowerCamelCase : Tuple = True
lowerCamelCase : List[str] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
lowerCamelCase : Optional[Any] = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
lowerCamelCase : Optional[int] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCamelCase : Union[str, Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCamelCase : int = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCamelCase : Dict = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
SCREAMING_SNAKE_CASE__ : Any = 1E-4
def A ( ) -> str:
lowerCamelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self ) -> Any:
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def _lowercase ( self ) -> int:
lowerCamelCase : Dict = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(_lowerCamelCase )
lowerCamelCase : Any = self.default_image_processor
lowerCamelCase : Union[str, Any] = prepare_img()
lowerCamelCase : Dict = image_processor(_lowerCamelCase , return_tensors="pt" ).to(_lowerCamelCase )
lowerCamelCase : List[str] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
lowerCamelCase : List[Any] = model(**_lowerCamelCase )
lowerCamelCase : List[str] = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
lowerCamelCase : Optional[Any] = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
lowerCamelCase : Dict = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def _lowercase ( self ) -> str:
lowerCamelCase : Tuple = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(_lowerCamelCase )
.eval()
)
lowerCamelCase : Union[str, Any] = self.default_image_processor
lowerCamelCase : str = prepare_img()
lowerCamelCase : Dict = image_processor(_lowerCamelCase , return_tensors="pt" ).to(_lowerCamelCase )
lowerCamelCase : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
lowerCamelCase : List[str] = model(**_lowerCamelCase )
# masks_queries_logits
lowerCamelCase : str = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCamelCase : Tuple = [
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
lowerCamelCase : List[str] = torch.tensor(_lowerCamelCase ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
# class_queries_logits
lowerCamelCase : Optional[int] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCamelCase : int = torch.tensor(
[
[1.6512e00, -5.2572e00, -3.3519e00],
[3.6169e-02, -5.9025e00, -2.9313e00],
[1.0766e-04, -7.7630e00, -5.1263e00],
] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Union[str, Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(_lowerCamelCase )
.eval()
)
lowerCamelCase : int = self.default_image_processor
lowerCamelCase : Optional[int] = prepare_img()
lowerCamelCase : Optional[Any] = image_processor(_lowerCamelCase , return_tensors="pt" ).to(_lowerCamelCase )
lowerCamelCase : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
lowerCamelCase : str = model(**_lowerCamelCase )
# masks_queries_logits
lowerCamelCase : Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCamelCase : Union[str, Any] = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
lowerCamelCase : List[str] = torch.tensor(_lowerCamelCase ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
# class_queries_logits
lowerCamelCase : int = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCamelCase : List[str] = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def _lowercase ( self ) -> Any:
lowerCamelCase : int = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(_lowerCamelCase )
.eval()
)
lowerCamelCase : List[str] = self.default_image_processor
lowerCamelCase : Any = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
lowerCamelCase : Optional[Any] = inputs["pixel_values"].to(_lowerCamelCase )
lowerCamelCase : int = [el.to(_lowerCamelCase ) for el in inputs["mask_labels"]]
lowerCamelCase : List[str] = [el.to(_lowerCamelCase ) for el in inputs["class_labels"]]
with torch.no_grad():
lowerCamelCase : Union[str, Any] = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
| 311 |
'''simple docstring'''
from __future__ import annotations
def __snake_case ( lowerCamelCase_ : list[int] , lowerCamelCase_ : int ):
'''simple docstring'''
if len(lowerCamelCase_ ) < k or k < 0:
raise ValueError("Invalid Input" )
__magic_name__ = __magic_name__ = sum(array[:k] )
for i in range(len(lowerCamelCase_ ) - k ):
__magic_name__ = current_sum - array[i] + array[i + k]
__magic_name__ = max(lowerCamelCase_ , lowerCamelCase_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__magic_name__ : List[str] =[randint(-10_00, 10_00) for i in range(1_00)]
__magic_name__ : List[str] =randint(0, 1_10)
print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 664 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
"""simple docstring"""
_A = StableUnCLIPImgaImgPipeline
_A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_A = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_A = frozenset([])
def _a (self ):
'''simple docstring'''
lowerCamelCase = 32
lowerCamelCase = embedder_hidden_size
# image encoding components
lowerCamelCase = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
lowerCamelCase = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=_lowerCamelCase , projection_dim=_lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase = StableUnCLIPImageNormalizer(embedding_dim=_lowerCamelCase )
lowerCamelCase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowerCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCamelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
lowerCamelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowerCamelCase , layers_per_block=1 , upcast_attention=_lowerCamelCase , use_linear_projection=_lowerCamelCase , )
torch.manual_seed(0 )
lowerCamelCase = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=_lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowerCamelCase = AutoencoderKL()
lowerCamelCase = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def _a (self , __a , __a=0 , __a=True ):
'''simple docstring'''
if str(_lowerCamelCase ).startswith("mps" ):
lowerCamelCase = torch.manual_seed(_lowerCamelCase )
else:
lowerCamelCase = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if pil_image:
lowerCamelCase = input_image * 0.5 + 0.5
lowerCamelCase = input_image.clamp(0 , 1 )
lowerCamelCase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCamelCase = DiffusionPipeline.numpy_to_pil(_lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def _a (self ):
'''simple docstring'''
lowerCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = StableUnCLIPImgaImgPipeline(**_lowerCamelCase )
lowerCamelCase = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCamelCase = self.get_dummy_inputs(_lowerCamelCase )
inputs.update({"image_embeds": None} )
lowerCamelCase = sd_pipe(**_lowerCamelCase ).images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _a (self ):
'''simple docstring'''
lowerCamelCase = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=_lowerCamelCase )
def _a (self ):
'''simple docstring'''
lowerCamelCase = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=_lowerCamelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _a (self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_lowerCamelCase )
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
'''simple docstring'''
lowerCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowerCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
lowerCamelCase = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase = pipe(_lowerCamelCase , "anime turle" , generator=_lowerCamelCase , output_type="np" )
lowerCamelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
def _a (self ):
'''simple docstring'''
lowerCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowerCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
lowerCamelCase = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase = pipe(_lowerCamelCase , "anime turle" , generator=_lowerCamelCase , output_type="np" )
lowerCamelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
def _a (self ):
'''simple docstring'''
lowerCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
lowerCamelCase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase = pipe(
_lowerCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9 | 623 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : int =logging.get_logger(__name__)
__magic_name__ : List[Any] ={}
class UpperCamelCase_ ( A ):
"""simple docstring"""
UpperCAmelCase__ : int = '''llama'''
UpperCAmelCase__ : Any = ['''past_key_values''']
def __init__( self : List[Any] , _lowerCamelCase : List[Any]=3_20_00 , _lowerCamelCase : Optional[Any]=40_96 , _lowerCamelCase : Tuple=1_10_08 , _lowerCamelCase : List[Any]=32 , _lowerCamelCase : Tuple=32 , _lowerCamelCase : List[str]=None , _lowerCamelCase : str="silu" , _lowerCamelCase : Optional[Any]=20_48 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : Union[str, Any]=1e-6 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Dict=0 , _lowerCamelCase : int=1 , _lowerCamelCase : str=2 , _lowerCamelCase : List[Any]=1 , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : List[str]=None , **_lowerCamelCase : List[Any] , ) -> Any:
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = hidden_size
__magic_name__ = intermediate_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__magic_name__ = num_attention_heads
__magic_name__ = num_key_value_heads
__magic_name__ = hidden_act
__magic_name__ = initializer_range
__magic_name__ = rms_norm_eps
__magic_name__ = pretraining_tp
__magic_name__ = use_cache
__magic_name__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase , )
def __A ( self : Union[str, Any] ) -> List[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'got {self.rope_scaling}' )
__magic_name__ = self.rope_scaling.get("type" , _lowerCamelCase )
__magic_name__ = self.rope_scaling.get("factor" , _lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(_lowerCamelCase , _lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 664 | 0 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__lowerCamelCase : List[Any] = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , __A : str=-1 ):
# in NER datasets, the last column is usually reserved for NER label
snake_case__ : List[str] = label_idx
def _lowercase ( self : Any , __A : str , __A : Union[Split, str] ):
if isinstance(_lowerCamelCase , _lowerCamelCase ):
snake_case__ : Any = mode.value
snake_case__ : List[Any] = os.path.join(_lowerCamelCase , f'''{mode}.txt''' )
snake_case__ : Dict = 1
snake_case__ : List[str] = []
with open(_lowerCamelCase , encoding="utf-8" ) as f:
snake_case__ : Dict = []
snake_case__ : Optional[Any] = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
snake_case__ : str = []
snake_case__ : Dict = []
else:
snake_case__ : Any = line.split(" " )
words.append(splits[0] )
if len(_lowerCamelCase ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
return examples
def _lowercase ( self : Optional[Any] , __A : TextIO , __A : TextIO , __A : List ):
snake_case__ : Optional[int] = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(_lowerCamelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
snake_case__ : List[Any] = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(_lowerCamelCase )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def _lowercase ( self : Tuple , __A : str ):
if path:
with open(_lowerCamelCase , "r" ) as f:
snake_case__ : Any = f.read().splitlines()
if "O" not in labels:
snake_case__ : Union[str, Any] = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self : int ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def _lowercase ( self : int , __A : str ):
if path:
with open(_lowerCamelCase , "r" ) as f:
snake_case__ : str = f.read().splitlines()
if "O" not in labels:
snake_case__ : Optional[int] = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
def _lowercase ( self : List[Any] , __A : Union[str, Any] , __A : Union[Split, str] ):
if isinstance(_lowerCamelCase , _lowerCamelCase ):
snake_case__ : Dict = mode.value
snake_case__ : Union[str, Any] = os.path.join(_lowerCamelCase , f'''{mode}.txt''' )
snake_case__ : Union[str, Any] = 1
snake_case__ : List[Any] = []
with open(_lowerCamelCase , encoding="utf-8" ) as f:
for sentence in parse_incr(_lowerCamelCase ):
snake_case__ : str = []
snake_case__ : str = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
return examples
def _lowercase ( self : Optional[int] , __A : TextIO , __A : TextIO , __A : List ):
snake_case__ : int = 0
for sentence in parse_incr(_lowerCamelCase ):
snake_case__ : List[Any] = preds_list[example_id]
snake_case__ : Optional[Any] = ""
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(_lowerCamelCase )
example_id += 1
def _lowercase ( self : Dict , __A : str ):
if path:
with open(_lowerCamelCase , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 297 |
'''simple docstring'''
__magic_name__ : Dict =8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def __snake_case ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __snake_case ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 664 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class _lowercase ( snake_case_ ):
lowercase = 42
class _lowercase ( snake_case_ , snake_case_ ):
@register_to_config
def __init__( self : Tuple , snake_case : int = 3 , snake_case : int = 3 , snake_case : Tuple[str] = ("DownEncoderBlock2D",) , snake_case : Tuple[str] = ("UpDecoderBlock2D",) , snake_case : Tuple[int] = (6_4,) , snake_case : int = 1 , snake_case : str = "silu" , snake_case : int = 3 , snake_case : int = 3_2 , snake_case : int = 2_5_6 , snake_case : int = 3_2 , snake_case : Optional[int] = None , snake_case : float = 0.18215 , snake_case : str = "group" , ) -> Dict:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
UpperCamelCase_ : int = Encoder(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , down_block_types=_lowerCamelCase , block_out_channels=_lowerCamelCase , layers_per_block=_lowerCamelCase , act_fn=_lowerCamelCase , norm_num_groups=_lowerCamelCase , double_z=_lowerCamelCase , )
UpperCamelCase_ : Union[str, Any] = vq_embed_dim if vq_embed_dim is not None else latent_channels
UpperCamelCase_ : List[str] = nn.Convad(_lowerCamelCase , _lowerCamelCase , 1 )
UpperCamelCase_ : Optional[int] = VectorQuantizer(_lowerCamelCase , _lowerCamelCase , beta=0.25 , remap=_lowerCamelCase , sane_index_shape=_lowerCamelCase )
UpperCamelCase_ : Dict = nn.Convad(_lowerCamelCase , _lowerCamelCase , 1 )
# pass init params to Decoder
UpperCamelCase_ : int = Decoder(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , up_block_types=_lowerCamelCase , block_out_channels=_lowerCamelCase , layers_per_block=_lowerCamelCase , act_fn=_lowerCamelCase , norm_num_groups=_lowerCamelCase , norm_type=_lowerCamelCase , )
@apply_forward_hook
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : torch.FloatTensor , snake_case : bool = True ) -> VQEncoderOutput:
"""simple docstring"""
UpperCamelCase_ : Dict = self.encoder(_lowerCamelCase )
UpperCamelCase_ : Any = self.quant_conv(_lowerCamelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_lowerCamelCase )
@apply_forward_hook
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : torch.FloatTensor , snake_case : bool = False , snake_case : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if not force_not_quantize:
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = self.quantize(_lowerCamelCase )
else:
UpperCamelCase_ : List[Any] = h
UpperCamelCase_ : List[Any] = self.post_quant_conv(_lowerCamelCase )
UpperCamelCase_ : int = self.decoder(_lowerCamelCase , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : torch.FloatTensor , snake_case : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
UpperCamelCase_ : Tuple = sample
UpperCamelCase_ : Dict = self.encode(_lowerCamelCase ).latents
UpperCamelCase_ : Any = self.decode(_lowerCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCamelCase )
| 417 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__magic_name__ : List[Any] =logging.getLogger(__name__)
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : str=-1 ) -> List[str]:
# in NER datasets, the last column is usually reserved for NER label
__magic_name__ = label_idx
def __A ( self : Any , _lowerCamelCase : str , _lowerCamelCase : Union[Split, str] ) -> List[InputExample]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = mode.value
__magic_name__ = os.path.join(_lowerCamelCase , f'{mode}.txt' )
__magic_name__ = 1
__magic_name__ = []
with open(_lowerCamelCase , encoding="utf-8" ) as f:
__magic_name__ = []
__magic_name__ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
__magic_name__ = []
__magic_name__ = []
else:
__magic_name__ = line.split(" " )
words.append(splits[0] )
if len(_lowerCamelCase ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
return examples
def __A ( self : Optional[Any] , _lowerCamelCase : TextIO , _lowerCamelCase : TextIO , _lowerCamelCase : List ) -> Union[str, Any]:
__magic_name__ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(_lowerCamelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__magic_name__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(_lowerCamelCase )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def __A ( self : Tuple , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
__magic_name__ = f.read().splitlines()
if "O" not in labels:
__magic_name__ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __init__( self : int ) -> str:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __A ( self : int , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
__magic_name__ = f.read().splitlines()
if "O" not in labels:
__magic_name__ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __A ( self : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[Split, str] ) -> List[InputExample]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = mode.value
__magic_name__ = os.path.join(_lowerCamelCase , f'{mode}.txt' )
__magic_name__ = 1
__magic_name__ = []
with open(_lowerCamelCase , encoding="utf-8" ) as f:
for sentence in parse_incr(_lowerCamelCase ):
__magic_name__ = []
__magic_name__ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
return examples
def __A ( self : Optional[int] , _lowerCamelCase : TextIO , _lowerCamelCase : TextIO , _lowerCamelCase : List ) -> Any:
__magic_name__ = 0
for sentence in parse_incr(_lowerCamelCase ):
__magic_name__ = preds_list[example_id]
__magic_name__ = ""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(_lowerCamelCase )
example_id += 1
def __A ( self : Dict , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 664 | 0 |
'''simple docstring'''
import string
import numpy
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ) -> List[Any]:
return b if a == 0 else greatest_common_divisor(b % a , lowerCamelCase_ )
class a_ :
__lowerCAmelCase : str = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
__lowerCAmelCase : List[str] = numpy.vectorize(lambda _a : x % 3_6 )
__lowerCAmelCase : Optional[int] = numpy.vectorize(_a )
def __init__( self , snake_case_ ):
_lowerCAmelCase : List[Any] = self.modulus(_lowerCamelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_lowerCAmelCase : Union[str, Any] = encrypt_key.shape[0]
def __UpperCamelCase ( self , snake_case_ ):
return self.key_string.index(_lowerCamelCase )
def __UpperCamelCase ( self , snake_case_ ):
return self.key_string[round(_lowerCamelCase )]
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_lowerCAmelCase : Any = det % len(self.key_string )
_lowerCAmelCase : List[str] = len(self.key_string )
if greatest_common_divisor(_lowerCamelCase , len(self.key_string ) ) != 1:
_lowerCAmelCase : Union[str, Any] = (
f'determinant modular {req_l} of encryption key({det}) '
f'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(_lowerCamelCase )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Tuple = [char for char in text.upper() if char in self.key_string]
_lowerCAmelCase : int = chars[-1]
while len(_lowerCamelCase ) % self.break_key != 0:
chars.append(_lowerCamelCase )
return "".join(_lowerCamelCase )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = self.process_text(text.upper() )
_lowerCAmelCase : str = """"""
for i in range(0 , len(_lowerCamelCase ) - self.break_key + 1 , self.break_key ):
_lowerCAmelCase : Optional[Any] = text[i : i + self.break_key]
_lowerCAmelCase : Optional[Any] = [self.replace_letters(_lowerCamelCase ) for char in batch]
_lowerCAmelCase : Dict = numpy.array([vec] ).T
_lowerCAmelCase : Any = self.modulus(self.encrypt_key.dot(_lowerCamelCase ) ).T.tolist()[
0
]
_lowerCAmelCase : Tuple = """""".join(
self.replace_digits(_lowerCamelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_lowerCAmelCase : List[str] = det % len(self.key_string )
_lowerCAmelCase : List[Any] = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
_lowerCAmelCase : str = i
break
_lowerCAmelCase : Optional[Any] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(_lowerCamelCase ) )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = self.make_decrypt_key()
_lowerCAmelCase : int = self.process_text(text.upper() )
_lowerCAmelCase : int = """"""
for i in range(0 , len(_lowerCamelCase ) - self.break_key + 1 , self.break_key ):
_lowerCAmelCase : int = text[i : i + self.break_key]
_lowerCAmelCase : str = [self.replace_letters(_lowerCamelCase ) for char in batch]
_lowerCAmelCase : int = numpy.array([vec] ).T
_lowerCAmelCase : Tuple = self.modulus(decrypt_key.dot(_lowerCamelCase ) ).T.tolist()[0]
_lowerCAmelCase : Any = """""".join(
self.replace_digits(_lowerCamelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def _UpperCAmelCase ( ) -> int:
_lowerCAmelCase : Optional[Any] = int(input("""Enter the order of the encryption key: """ ) )
_lowerCAmelCase : List[Any] = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(lowerCamelCase_ ):
_lowerCAmelCase : Dict = [int(lowerCamelCase_ ) for x in input().split()]
hill_matrix.append(lowerCamelCase_ )
_lowerCAmelCase : int = HillCipher(numpy.array(lowerCamelCase_ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
_lowerCAmelCase : Tuple = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
_lowerCAmelCase : List[Any] = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(lowerCamelCase_ ) )
elif option == "2":
_lowerCAmelCase : str = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 384 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0 ) -> None:
__magic_name__ , __magic_name__ = row, column
__magic_name__ = [[default_value for c in range(_lowerCamelCase )] for r in range(_lowerCamelCase )]
def __str__( self : Optional[Any] ) -> str:
__magic_name__ = f'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
__magic_name__ = 0
for row_vector in self.array:
for obj in row_vector:
__magic_name__ = max(_lowerCamelCase , len(str(_lowerCamelCase ) ) )
__magic_name__ = f'%{max_element_length}s'
# Make string and return
def single_line(_lowerCamelCase : list[float] ) -> str:
nonlocal string_format_identifier
__magic_name__ = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self : Optional[int] ) -> str:
return str(self )
def __A ( self : Optional[Any] , _lowerCamelCase : tuple[int, int] ) -> bool:
if not (isinstance(_lowerCamelCase , (list, tuple) ) and len(_lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Optional[int] , _lowerCamelCase : tuple[int, int] ) -> Any:
assert self.validate_indicies(_lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Tuple , _lowerCamelCase : tuple[int, int] , _lowerCamelCase : float ) -> None:
assert self.validate_indicies(_lowerCamelCase )
__magic_name__ = value
def __add__( self : Union[str, Any] , _lowerCamelCase : Matrix ) -> Matrix:
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c] + another[r, c]
return result
def __neg__( self : int ) -> Matrix:
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = -self[r, c]
return result
def __sub__( self : Optional[int] , _lowerCamelCase : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : Optional[int] , _lowerCamelCase : int | float | Matrix ) -> Matrix:
if isinstance(_lowerCamelCase , (int, float) ): # Scalar multiplication
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c] * another
return result
elif isinstance(_lowerCamelCase , _lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
__magic_name__ = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__magic_name__ = f'Unsupported type given for another ({type(_lowerCamelCase )})'
raise TypeError(_lowerCamelCase )
def __A ( self : Optional[int] ) -> Matrix:
__magic_name__ = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c]
return result
def __A ( self : int , _lowerCamelCase : Matrix , _lowerCamelCase : Matrix ) -> Any:
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__magic_name__ = v.transpose()
__magic_name__ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = Matrix(3 , 3 , 0 )
for i in range(3 ):
__magic_name__ = 1
print(F'a^(-1) is {ainv}' )
# u, v
__magic_name__ = Matrix(3 , 1 , 0 )
__magic_name__ , __magic_name__ , __magic_name__ = 1, 2, -3
__magic_name__ = Matrix(3 , 1 , 0 )
__magic_name__ , __magic_name__ , __magic_name__ = 4, -2, 5
print(F'u is {u}' )
print(F'v is {v}' )
print(F'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(F'(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase_ , lowerCamelCase_ )}' )
def __snake_case ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 664 | 0 |
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = 'The Nymphenburg Palace is a beautiful palace in Munich!'
def lowercase (snake_case__ : str , snake_case__ : str ) -> Any:
'''simple docstring'''
lowerCAmelCase = {
"""attention_cell""": """multi_head""",
"""num_layers""": 4,
"""units""": 1_024,
"""hidden_size""": 768,
"""max_length""": 512,
"""num_heads""": 8,
"""scaled""": True,
"""dropout""": 0.1,
"""use_residual""": True,
"""embed_size""": 1_024,
"""embed_dropout""": 0.1,
"""word_embed""": None,
"""layer_norm_eps""": 1e-5,
"""token_type_vocab_size""": 2,
}
lowerCAmelCase = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
lowerCAmelCase = BERTEncoder(
attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , lowerCamelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
lowerCAmelCase = """openwebtext_ccnews_stories_books_cased"""
# Specify download folder to Gluonnlp's vocab
lowerCAmelCase = os.path.join(get_home_dir() , """models""" )
lowerCAmelCase = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ )
lowerCAmelCase = nlp.model.BERTModel(
lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , )
original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ )
lowerCAmelCase = original_bort._collect_params_with_prefix()
# Build our config 🤗
lowerCAmelCase = {
"""architectures""": ["""BertForMaskedLM"""],
"""attention_probs_dropout_prob""": predefined_args["""dropout"""],
"""hidden_act""": """gelu""",
"""hidden_dropout_prob""": predefined_args["""dropout"""],
"""hidden_size""": predefined_args["""embed_size"""],
"""initializer_range""": 0.02,
"""intermediate_size""": predefined_args["""hidden_size"""],
"""layer_norm_eps""": predefined_args["""layer_norm_eps"""],
"""max_position_embeddings""": predefined_args["""max_length"""],
"""model_type""": """bort""",
"""num_attention_heads""": predefined_args["""num_heads"""],
"""num_hidden_layers""": predefined_args["""num_layers"""],
"""pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa
"""type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa
"""vocab_size""": len(lowerCamelCase_ ),
}
lowerCAmelCase = BertConfig.from_dict(lowerCamelCase_ )
lowerCAmelCase = BertForMaskedLM(lowerCamelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(snake_case__ : Any ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(snake_case__ : Optional[int] , snake_case__ : int ):
lowerCAmelCase = hf_param.shape
lowerCAmelCase = to_torch(params[gluon_param] )
lowerCAmelCase = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
lowerCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" )
lowerCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" )
lowerCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" )
lowerCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
lowerCAmelCase = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
lowerCAmelCase = hf_bort_model.bert.encoder.layer[i]
# self attention
lowerCAmelCase = layer.attention.self
lowerCAmelCase = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
lowerCAmelCase = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
lowerCAmelCase = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
lowerCAmelCase = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
lowerCAmelCase = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
lowerCAmelCase = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
lowerCAmelCase = layer.attention.output
lowerCAmelCase = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
lowerCAmelCase = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
lowerCAmelCase = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
lowerCAmelCase = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
lowerCAmelCase = layer.intermediate
lowerCAmelCase = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
lowerCAmelCase = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
lowerCAmelCase = layer.output
lowerCAmelCase = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
lowerCAmelCase = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
lowerCAmelCase = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
lowerCAmelCase = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
lowerCAmelCase = RobertaTokenizer.from_pretrained("""roberta-base""" )
lowerCAmelCase = tokenizer.encode_plus(lowerCamelCase_ )["""input_ids"""]
# Get gluon output
lowerCAmelCase = mx.nd.array([input_ids] )
lowerCAmelCase = original_bort(inputs=lowerCamelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase_ )
lowerCAmelCase = BertModel.from_pretrained(lowerCamelCase_ )
hf_bort_model.eval()
lowerCAmelCase = tokenizer.encode_plus(lowerCamelCase_ , return_tensors="""pt""" )
lowerCAmelCase = hf_bort_model(**lowerCamelCase_ )[0]
lowerCAmelCase = output_gluon[0].asnumpy()
lowerCAmelCase = output_hf[0].detach().numpy()
lowerCAmelCase = np.max(np.abs(hf_layer - gluon_layer ) ).item()
lowerCAmelCase = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if success:
print("""✔️ Both model do output the same tensors""" )
else:
print("""❌ Both model do **NOT** output the same tensors""" )
print("""Absolute difference is:""" , lowerCamelCase_ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 169 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__magic_name__ : List[Any] =logging.getLogger(__name__)
__magic_name__ : int ='Hello world! cécé herlolip'
__magic_name__ : List[Any] =namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def __snake_case ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict ):
'''simple docstring'''
__magic_name__ = BertAbsConfig(
temp_dir="." , finetune_bert=lowerCamelCase_ , large=lowerCamelCase_ , share_emb=lowerCamelCase_ , use_bert_emb=lowerCamelCase_ , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__magic_name__ = torch.load(lowerCamelCase_ , lambda lowerCamelCase_ , lowerCamelCase_ : storage )
__magic_name__ = AbsSummarizer(lowerCamelCase_ , torch.device("cpu" ) , lowerCamelCase_ )
original.eval()
__magic_name__ = BertAbsSummarizer(lowerCamelCase_ , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
__magic_name__ = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
__magic_name__ = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) )
__magic_name__ = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
__magic_name__ = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) )
__magic_name__ = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__magic_name__ = encoder_input_ids
__magic_name__ = decoder_input_ids
__magic_name__ = __magic_name__ = None
__magic_name__ = None
__magic_name__ = __magic_name__ = None
__magic_name__ = __magic_name__ = None
__magic_name__ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__magic_name__ = original(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
__magic_name__ = original.generator(lowerCamelCase_ )
__magic_name__ = new_model(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
__magic_name__ = new_model.generator(lowerCamelCase_ )
__magic_name__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowerCamelCase_ ) )
__magic_name__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowerCamelCase_ ) )
__magic_name__ = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
__magic_name__ : Dict =argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
__magic_name__ : Any =parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 664 | 0 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
UpperCamelCase_ : List[str] = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
UpperCamelCase_ : List[str] = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
UpperCamelCase_ : str = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def UpperCamelCase ( _UpperCAmelCase : Any ) -> List[str]:
'''simple docstring'''
def remove_articles(_UpperCAmelCase : Dict ):
_lowercase : Union[str, Any] = re.compile(R"\b(a|an|the)\b" , re.UNICODE )
return re.sub(lowerCamelCase_ , " " , lowerCamelCase_ )
def white_space_fix(_UpperCAmelCase : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(_UpperCAmelCase : str ):
_lowercase : Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCAmelCase : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) )
def UpperCamelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str ) -> int:
'''simple docstring'''
return int(normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ ) )
def UpperCamelCase ( _UpperCAmelCase : str , _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
_lowercase : List[str] = [any(compute_exact(lowerCamelCase_ , lowerCamelCase_ ) for ref in refs ) for pred, refs in zip(lowerCamelCase_ , lowerCamelCase_ )]
return (sum(lowerCamelCase_ ) / len(lowerCamelCase_ )) * 100
def UpperCamelCase ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
_lowercase : Dict = [rgram for rgrams in rgramslist for rgram in rgrams]
_lowercase : List[str] = Counter(lowerCamelCase_ )
_lowercase : Tuple = Counter(lowerCamelCase_ )
_lowercase : int = Counter()
for sgram, scount in sgramcounter.items():
_lowercase : Any = scount * numref
_lowercase : List[Any] = Counter(lowerCamelCase_ )
_lowercase : Dict = Counter()
for cgram, ccount in cgramcounter.items():
_lowercase : int = ccount * numref
# KEEP
_lowercase : Any = sgramcounter_rep & cgramcounter_rep
_lowercase : Union[str, Any] = keepgramcounter_rep & rgramcounter
_lowercase : List[Any] = sgramcounter_rep & rgramcounter
_lowercase : Dict = 0
_lowercase : Any = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowercase : Optional[int] = 1
_lowercase : Tuple = 1
if len(lowerCamelCase_ ) > 0:
_lowercase : Dict = keeptmpscorea / len(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_lowercase : Union[str, Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_lowercase : int = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_lowercase : Optional[Any] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_lowercase : List[Any] = sgramcounter_rep - cgramcounter_rep
_lowercase : int = delgramcounter_rep - rgramcounter
_lowercase : Optional[Any] = sgramcounter_rep - rgramcounter
_lowercase : Union[str, Any] = 0
_lowercase : Optional[int] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowercase : Any = 1
if len(lowerCamelCase_ ) > 0:
_lowercase : List[Any] = deltmpscorea / len(lowerCamelCase_ )
# ADDITION
_lowercase : List[Any] = set(lowerCamelCase_ ) - set(lowerCamelCase_ )
_lowercase : int = set(lowerCamelCase_ ) & set(lowerCamelCase_ )
_lowercase : Optional[int] = set(lowerCamelCase_ ) - set(lowerCamelCase_ )
_lowercase : str = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowercase : Union[str, Any] = 1
_lowercase : Optional[Any] = 1
if len(lowerCamelCase_ ) > 0:
_lowercase : int = addtmpscore / len(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
_lowercase : Tuple = addtmpscore / len(lowerCamelCase_ )
_lowercase : Any = 0
if addscore_precision > 0 or addscore_recall > 0:
_lowercase : Optional[Any] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def UpperCamelCase ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
_lowercase : Tuple = len(lowerCamelCase_ )
_lowercase : str = ssent.split(" " )
_lowercase : Tuple = csent.split(" " )
_lowercase : List[str] = []
_lowercase : Any = []
_lowercase : Optional[int] = []
_lowercase : List[Any] = []
_lowercase : str = []
_lowercase : Optional[Any] = []
_lowercase : List[Any] = []
_lowercase : str = []
_lowercase : Optional[Any] = []
_lowercase : str = []
for rsent in rsents:
_lowercase : List[str] = rsent.split(" " )
_lowercase : Tuple = []
_lowercase : Dict = []
_lowercase : Union[str, Any] = []
ragramslist.append(lowerCamelCase_ )
for i in range(0 , len(lowerCamelCase_ ) - 1 ):
if i < len(lowerCamelCase_ ) - 1:
_lowercase : Optional[int] = ragrams[i] + " " + ragrams[i + 1]
ragrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 2:
_lowercase : Tuple = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2]
ragrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 3:
_lowercase : Dict = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3]
ragrams.append(lowerCamelCase_ )
ragramslist.append(lowerCamelCase_ )
ragramslist.append(lowerCamelCase_ )
ragramslist.append(lowerCamelCase_ )
for i in range(0 , len(lowerCamelCase_ ) - 1 ):
if i < len(lowerCamelCase_ ) - 1:
_lowercase : Optional[int] = sagrams[i] + " " + sagrams[i + 1]
sagrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 2:
_lowercase : int = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2]
sagrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 3:
_lowercase : Dict = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3]
sagrams.append(lowerCamelCase_ )
for i in range(0 , len(lowerCamelCase_ ) - 1 ):
if i < len(lowerCamelCase_ ) - 1:
_lowercase : Optional[int] = cagrams[i] + " " + cagrams[i + 1]
cagrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 2:
_lowercase : List[str] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2]
cagrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 3:
_lowercase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3]
cagrams.append(lowerCamelCase_ )
((_lowercase) , (_lowercase) , (_lowercase)) : Dict = SARIngram(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
((_lowercase) , (_lowercase) , (_lowercase)) : Optional[int] = SARIngram(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
((_lowercase) , (_lowercase) , (_lowercase)) : Union[str, Any] = SARIngram(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
((_lowercase) , (_lowercase) , (_lowercase)) : Tuple = SARIngram(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[str] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_lowercase : Any = sum([delascore, delascore, delascore, delascore] ) / 4
_lowercase : str = sum([addascore, addascore, addascore, addascore] ) / 4
_lowercase : Tuple = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def UpperCamelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : bool = True , _UpperCAmelCase : str = "13a" , _UpperCAmelCase : bool = True ) -> Union[str, Any]:
'''simple docstring'''
if lowercase:
_lowercase : List[Any] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_lowercase : Any = sacrebleu.metrics.bleu._get_tokenizer(lowerCamelCase_ )()(lowerCamelCase_ )
else:
_lowercase : int = sacrebleu.TOKENIZERS[tokenizer]()(lowerCamelCase_ )
elif tokenizer == "moses":
_lowercase : Optional[int] = sacremoses.MosesTokenizer().tokenize(lowerCamelCase_ , return_str=lowerCamelCase_ , escape=lowerCamelCase_ )
elif tokenizer == "penn":
_lowercase : int = sacremoses.MosesTokenizer().penn_tokenize(lowerCamelCase_ , return_str=lowerCamelCase_ )
else:
_lowercase : List[Any] = sentence
if not return_str:
_lowercase : Dict = normalized_sent.split()
return normalized_sent
def UpperCamelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
if not (len(lowerCamelCase_ ) == len(lowerCamelCase_ ) == len(lowerCamelCase_ )):
raise ValueError("Sources length must match predictions and references lengths." )
_lowercase : Union[str, Any] = 0
for src, pred, refs in zip(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
sari_score += SARIsent(normalize(lowerCamelCase_ ) , normalize(lowerCamelCase_ ) , [normalize(lowerCamelCase_ ) for sent in refs] )
_lowercase : Optional[int] = sari_score / len(lowerCamelCase_ )
return 100 * sari_score
def UpperCamelCase ( _UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : List[str]="exp" , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : int=False , _UpperCAmelCase : Dict=False , ) -> str:
'''simple docstring'''
_lowercase : List[str] = len(references[0] )
if any(len(lowerCamelCase_ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
_lowercase : List[str] = [[refs[i] for refs in references] for i in range(lowerCamelCase_ )]
_lowercase : Union[str, Any] = sacrebleu.corpus_bleu(
lowerCamelCase_ , lowerCamelCase_ , smooth_method=lowerCamelCase_ , smooth_value=lowerCamelCase_ , force=lowerCamelCase_ , lowercase=lowerCamelCase_ , use_effective_order=lowerCamelCase_ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def _a(self : Optional[int] ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=[
"https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py",
"https://github.com/cocoxu/simplification/blob/master/SARI.py",
"https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py",
"https://github.com/mjpost/sacreBLEU",
] , reference_urls=[
"https://www.aclweb.org/anthology/Q16-1029.pdf",
"https://github.com/mjpost/sacreBLEU",
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def _a(self : List[str] , snake_case : Dict , snake_case : int , snake_case : List[str] ) -> List[str]:
_lowercase : int = {}
result.update({"sari": compute_sari(sources=_lowerCamelCase , predictions=_lowerCamelCase , references=_lowerCamelCase )} )
result.update({"sacrebleu": compute_sacrebleu(predictions=_lowerCamelCase , references=_lowerCamelCase )} )
result.update({"exact": compute_em(predictions=_lowerCamelCase , references=_lowerCamelCase )} )
return result
| 461 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ) -> str:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__magic_name__ = [[1, 2, 4], [1, 2, 3, 4]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
self.assertTrue(isinstance(dc.token_ids , _lowerCamelCase ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __A ( self : List[Any] ) -> str:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__magic_name__ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(_lowerCamelCase ) # fails here
def __A ( self : List[Any] ) -> int:
__magic_name__ = [[1, 2, 3], [1, 2, 4]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(3 )
__magic_name__ = stepped is True and completed is True and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __A ( self : Any ) -> Union[str, Any]:
__magic_name__ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 664 | 0 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def a ( a , a , a ) ->Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AlbertConfig.from_json_file(lowerCamelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
SCREAMING_SNAKE_CASE = AlbertForPreTraining(lowerCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase_ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--albert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained ALBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path) | 201 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__magic_name__ : Dict ={
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_28,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls : Any ) -> Union[str, Any]:
__magic_name__ = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def __A ( cls : Any ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def __A ( self : Optional[Any] ) -> Dict:
__magic_name__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowerCamelCase , repo_id="test-config" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : str ) -> Optional[int]:
__magic_name__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_lowerCamelCase , repo_id="valid_org/test-config-org" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : Optional[int] ) -> Union[str, Any]:
CustomConfig.register_for_auto_class()
__magic_name__ = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
__magic_name__ = AutoConfig.from_pretrained(f'{USER}/test-dynamic-config' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Optional[int] ) -> Optional[Any]:
__magic_name__ = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__magic_name__ = c.n_embd + 1 # int
__magic_name__ = c.resid_pdrop + 1.0 # float
__magic_name__ = not c.scale_attn_weights # bool
__magic_name__ = c.summary_type + "foo" # str
c.update_from_string(
f'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(_lowerCamelCase , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(_lowerCamelCase , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(_lowerCamelCase , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(_lowerCamelCase , c.summary_type , "mismatch for key: summary_type" )
def __A ( self : List[Any] ) -> Union[str, Any]:
__magic_name__ = PretrainedConfig()
__magic_name__ = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_lowerCamelCase , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
__magic_name__ = [key for key, value in config_common_kwargs.items() if value == getattr(_lowerCamelCase , _lowerCamelCase )]
if len(_lowerCamelCase ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f' {", ".join(_lowerCamelCase )}.' )
def __A ( self : List[Any] ) -> List[Any]:
with self.assertRaises(_lowerCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(_lowerCamelCase )
def __A ( self : Tuple ) -> int:
# A mock response for an HTTP head request to emulate server down
__magic_name__ = mock.Mock()
__magic_name__ = 5_00
__magic_name__ = {}
__magic_name__ = HTTPError
__magic_name__ = {}
# Download this model to make sure it's in the cache.
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=_lowerCamelCase ) as mock_head:
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def __A ( self : Union[str, Any] ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
__magic_name__ = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def __A ( self : Dict ) -> Optional[int]:
__magic_name__ = AutoConfig.from_pretrained("bert-base-cased" )
__magic_name__ = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_lowerCamelCase )
__magic_name__ = 2
json.dump(configuration.to_dict() , open(os.path.join(_lowerCamelCase , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__magic_name__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__magic_name__ = ["config.42.0.0.json"]
__magic_name__ = 7_68
configuration.save_pretrained(_lowerCamelCase )
shutil.move(os.path.join(_lowerCamelCase , "config.4.0.0.json" ) , os.path.join(_lowerCamelCase , "config.42.0.0.json" ) )
__magic_name__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def __A ( self : Optional[int] ) -> str:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__magic_name__ = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
__magic_name__ = "v4.0.0"
__magic_name__ , __magic_name__ = new_transformers.models.auto.AutoConfig.from_pretrained(
_lowerCamelCase , return_unused_kwargs=_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_lowerCamelCase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__magic_name__ = "v3.0.0"
__magic_name__ = old_transformers.models.auto.AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 664 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = 0 ):
lowerCamelCase__ , lowerCamelCase__ = row, column
lowerCamelCase__ = [[default_value for c in range(_lowerCamelCase )] for r in range(_lowerCamelCase )]
def __str__( self ):
lowerCamelCase__ = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCamelCase__ = 0
for row_vector in self.array:
for obj in row_vector:
lowerCamelCase__ = max(_lowerCamelCase ,len(str(_lowerCamelCase ) ) )
lowerCamelCase__ = F'''%{max_element_length}s'''
# Make string and return
def single_line(_lowerCAmelCase ) -> str:
nonlocal string_format_identifier
lowerCamelCase__ = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self ):
return str(self )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if not (isinstance(_lowerCamelCase ,(list, tuple) ) and len(_lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self ,_lowerCAmelCase ):
assert self.validate_indicies(_lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self ,_lowerCAmelCase ,_lowerCAmelCase ):
assert self.validate_indicies(_lowerCamelCase )
lowerCamelCase__ = value
def __add__( self ,_lowerCAmelCase ):
assert isinstance(_lowerCamelCase ,_lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
lowerCamelCase__ = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ = self[r, c] + another[r, c]
return result
def __neg__( self ):
lowerCamelCase__ = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ = -self[r, c]
return result
def __sub__( self ,_lowerCAmelCase ):
return self + (-another)
def __mul__( self ,_lowerCAmelCase ):
if isinstance(_lowerCamelCase ,(int, float) ): # Scalar multiplication
lowerCamelCase__ = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ = self[r, c] * another
return result
elif isinstance(_lowerCamelCase ,_lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
lowerCamelCase__ = Matrix(self.row ,another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCamelCase__ = F'''Unsupported type given for another ({type(_lowerCamelCase )})'''
raise TypeError(_lowerCamelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = Matrix(self.column ,self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ = self[r, c]
return result
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
assert isinstance(_lowerCamelCase ,_lowerCamelCase ) and isinstance(_lowerCamelCase ,_lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCamelCase__ = v.transpose()
lowerCamelCase__ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A__ ( ):
lowerCamelCase__ = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCamelCase__ = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
lowerCamelCase__ = Matrix(3 , 1 , 0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1, 2, -3
lowerCamelCase__ = Matrix(3 , 1 , 0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase_ , lowerCamelCase_ )}''' )
def A__ ( ):
import doctest
doctest.testmod()
testa()
| 50 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , _lowerCamelCase : str , _lowerCamelCase : Optional[Any]=7 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : List[Any]=18 , _lowerCamelCase : Union[str, Any]=30 , _lowerCamelCase : Tuple=4_00 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=True , _lowerCamelCase : Dict=[0.5, 0.5, 0.5] , _lowerCamelCase : Dict=[0.5, 0.5, 0.5] , ) -> Dict:
__magic_name__ = size if size is not None else {"height": 18, "width": 18}
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = min_resolution
__magic_name__ = max_resolution
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = do_normalize
__magic_name__ = image_mean
__magic_name__ = image_std
def __A ( self : int ) -> List[str]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase_ ( A , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = DPTImageProcessor if is_vision_available() else None
def __A ( self : Dict ) -> Any:
__magic_name__ = DPTImageProcessingTester(self )
@property
def __A ( self : str ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Tuple ) -> List[str]:
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "size" ) )
def __A ( self : List[str] ) -> List[Any]:
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __A ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Dict ) -> Optional[Any]:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Optional[int] ) -> Dict:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 664 | 0 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
UpperCAmelCase_ : Tuple = threading.Lock()
UpperCAmelCase_ : Optional[logging.Handler] = None
UpperCAmelCase_ : List[str] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
UpperCAmelCase_ : str = logging.WARNING
UpperCAmelCase_ : Any = True
def UpperCAmelCase_ ( ):
'''simple docstring'''
_a : List[str] = os.getenv('TRANSFORMERS_VERBOSITY' , lowerCamelCase_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def UpperCAmelCase_ ( ):
'''simple docstring'''
return __name__.split('.' )[0]
def UpperCAmelCase_ ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def UpperCAmelCase_ ( ):
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_a : Dict = logging.StreamHandler() # Set sys.stderr as stream.
_a : List[Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
_a : List[str] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_a : Dict = False
def UpperCAmelCase_ ( ):
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
_a : int = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_a : List[str] = None
def UpperCAmelCase_ ( ):
'''simple docstring'''
return log_levels
def UpperCAmelCase_ ( A = None ):
'''simple docstring'''
if name is None:
_a : Tuple = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(lowerCamelCase_ )
def UpperCAmelCase_ ( ):
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase_ ( A ):
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(lowerCamelCase_ )
def UpperCAmelCase_ ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def UpperCAmelCase_ ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def UpperCAmelCase_ ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def UpperCAmelCase_ ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def UpperCAmelCase_ ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def UpperCAmelCase_ ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def UpperCAmelCase_ ( A ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(lowerCamelCase_ )
def UpperCAmelCase_ ( A ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(lowerCamelCase_ )
def UpperCAmelCase_ ( ):
'''simple docstring'''
_configure_library_root_logger()
_a : Optional[int] = False
def UpperCAmelCase_ ( ):
'''simple docstring'''
_configure_library_root_logger()
_a : List[Any] = True
def UpperCAmelCase_ ( ):
'''simple docstring'''
_a : int = _get_library_root_logger().handlers
for handler in handlers:
_a : Tuple = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(lowerCamelCase_ )
def UpperCAmelCase_ ( ):
'''simple docstring'''
_a : List[str] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(lowerCamelCase_ )
def UpperCAmelCase_ ( self , *A , **A ):
'''simple docstring'''
_a : Dict = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , lowerCamelCase_ )
if no_advisory_warnings:
return
self.warning(*lowerCamelCase_ , **lowerCamelCase_ )
UpperCAmelCase_ : int = warning_advice
@functools.lru_cache(lowerCamelCase_ )
def UpperCAmelCase_ ( self , *A , **A ):
'''simple docstring'''
self.warning(*lowerCamelCase_ , **lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = warning_once
class a :
'''simple docstring'''
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> Any: # pylint: disable=unused-argument
_a : str = args[0] if args else None
def __iter__( self ) -> Tuple:
return iter(self._iterator )
def __getattr__( self , lowerCamelCase_ ) -> List[Any]:
def empty_fn(*lowerCamelCase_ , **lowerCamelCase_ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> Any:
return self
def __exit__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
return
class a :
'''simple docstring'''
def __call__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> List[Any]:
if _tqdm_active:
return tqdm_lib.tqdm(*_lowerCamelCase , **_lowerCamelCase )
else:
return EmptyTqdm(*_lowerCamelCase , **_lowerCamelCase )
def __UpperCamelCase ( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> Union[str, Any]:
_a : List[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_lowerCamelCase , **_lowerCamelCase )
def __UpperCamelCase ( self ) -> Any:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase_ : List[Any] = _tqdm_cls()
def UpperCAmelCase_ ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase_ ( ):
'''simple docstring'''
global _tqdm_active
_a : Any = True
hf_hub_utils.enable_progress_bars()
def UpperCAmelCase_ ( ):
'''simple docstring'''
global _tqdm_active
_a : Dict = False
hf_hub_utils.disable_progress_bars()
| 120 |
'''simple docstring'''
import numpy
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCamelCase : numpy.ndarray , _lowerCamelCase : numpy.ndarray ) -> None:
__magic_name__ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__magic_name__ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__magic_name__ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__magic_name__ = numpy.random.rand(3 , 1 )
# Real output values provided.
__magic_name__ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__magic_name__ = numpy.zeros(output_array.shape )
def __A ( self : int ) -> numpy.ndarray:
__magic_name__ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __A ( self : Dict ) -> None:
__magic_name__ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__magic_name__ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__magic_name__ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __A ( self : Optional[int] , _lowerCamelCase : numpy.ndarray , _lowerCamelCase : int , _lowerCamelCase : bool ) -> None:
for iteration in range(1 , iterations + 1 ):
__magic_name__ = self.feedforward()
self.back_propagation()
if give_loss:
__magic_name__ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'Iteration {iteration} Loss: {loss}' )
def __A ( self : Tuple , _lowerCamelCase : numpy.ndarray ) -> int:
__magic_name__ = input_arr
__magic_name__ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def __snake_case ( lowerCamelCase_ : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def __snake_case ( lowerCamelCase_ : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__magic_name__ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__magic_name__ = TwoHiddenLayerNeuralNetwork(
input_array=lowerCamelCase_ , output_array=lowerCamelCase_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=lowerCamelCase_ , iterations=10 , give_loss=lowerCamelCase_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 664 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=30 , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.02 , UpperCamelCase__=None , ):
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A__ = (image_size // patch_size) ** 2
A__ = num_patches + 1
def lowercase_ ( self ):
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = ViTMSNModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A__ = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = self.type_sequence_label_size
A__ = ViTMSNForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A__ = model(_lowerCamelCase , labels=_lowerCamelCase )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A__ = 1
A__ = ViTMSNForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
lowercase__ : int = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
lowercase__ : List[str] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : Dict = False
lowercase__ : str = False
lowercase__ : Optional[int] = False
lowercase__ : Any = False
def lowercase_ ( self ):
'''simple docstring'''
A__ = ViTMSNModelTester(self )
A__ = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def lowercase_ ( self ):
'''simple docstring'''
pass
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_lowerCamelCase )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ViTMSNModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def __a ( ) -> List[Any]:
'''simple docstring'''
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def lowercase_ ( self ):
'''simple docstring'''
torch.manual_seed(2 )
A__ = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(_lowerCamelCase )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=_lowerCamelCase , return_tensors="pt" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
A__ = model(**_lowerCamelCase )
# verify the logits
A__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A__ = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) ) | 337 |
'''simple docstring'''
import torch
from transformers import AutoModel
class UpperCamelCase_ ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Any , _lowerCamelCase : Optional[int]="sayef/fsner-bert-base-uncased" ) -> List[Any]:
super(_lowerCamelCase , self ).__init__()
__magic_name__ = AutoModel.from_pretrained(_lowerCamelCase , return_dict=_lowerCamelCase )
__magic_name__ = torch.nn.CosineSimilarity(3 , 1e-08 )
__magic_name__ = torch.nn.Softmax(dim=1 )
def __A ( self : Tuple , **_lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return self.bert(**_lowerCamelCase ).last_hidden_state
def __A ( self : Dict , _lowerCamelCase : Dict ) -> Dict:
return token_embeddings.sum(2 , keepdim=_lowerCamelCase )
def __A ( self : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Tuple=1 ) -> Optional[Any]:
return self.softmax(T * self.cos(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] ) -> List[str]:
__magic_name__ = W_supports["sizes"].tolist()
__magic_name__ = W_supports["start_token_id"].item()
__magic_name__ = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__magic_name__ = self.BERT(**_lowerCamelCase )
__magic_name__ = self.BERT(**_lowerCamelCase )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = W_supports["input_ids"] == start_token_id
__magic_name__ = W_supports["input_ids"] == end_token_id
for i, size in enumerate(_lowerCamelCase ):
if i == 0:
__magic_name__ = 0
else:
__magic_name__ = support_sizes[i - 1]
__magic_name__ = S[s : s + size][start_token_masks[s : s + size]]
__magic_name__ = S[s : s + size][end_token_masks[s : s + size]]
__magic_name__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__magic_name__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__magic_name__ = torch.vstack((p_starts, p_start) )
__magic_name__ = torch.vstack((p_ends, p_end) )
else:
__magic_name__ = p_start
__magic_name__ = p_end
return p_starts, p_ends
| 664 | 0 |
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : torch.FloatTensor
lowerCamelCase_ : Optional[torch.FloatTensor] = None
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=0.999 ,_SCREAMING_SNAKE_CASE="cosine" ,) -> str:
if alpha_transform_type == "cosine":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCamelCase : Optional[Any] = []
for i in range(lowerCamelCase_ ):
lowerCamelCase : List[Any] = i / num_diffusion_timesteps
lowerCamelCase : Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase_ ) / alpha_bar_fn(lowerCamelCase_ ) ,lowerCamelCase_ ) )
return torch.tensor(lowerCamelCase_ ,dtype=torch.floataa )
class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = 1
@register_to_config
def __init__( self , UpperCamelCase__ = 1000 , UpperCamelCase__ = 0.0001 , UpperCamelCase__ = 0.02 , UpperCamelCase__ = "linear" , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = True , UpperCamelCase__ = 0 , UpperCamelCase__ = "epsilon" , UpperCamelCase__ = 1.0 , **UpperCamelCase__ , ) -> str:
if kwargs.get("set_alpha_to_one" , _lowerCamelCase ) is not None:
lowerCamelCase : Tuple = (
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate("set_alpha_to_one" , "1.0.0" , _lowerCamelCase , standard_warn=_lowerCamelCase )
lowerCamelCase : str = kwargs["set_alpha_to_one"]
if trained_betas is not None:
lowerCamelCase : Optional[int] = torch.tensor(_lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCamelCase : Any = torch.linspace(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCamelCase : List[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCamelCase : int = betas_for_alpha_bar(_lowerCamelCase )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowerCamelCase : Any = 1.0 - self.betas
lowerCamelCase : int = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
lowerCamelCase : Optional[Any] = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
lowerCamelCase : Any = 1.0
# setable values
lowerCamelCase : List[Any] = None
lowerCamelCase : Optional[int] = torch.from_numpy(np.arange(0 , _lowerCamelCase ).copy().astype(np.intaa ) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> torch.FloatTensor:
return sample
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Optional[int]:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
F''' maximal {self.config.num_train_timesteps} timesteps.''' )
lowerCamelCase : Optional[Any] = num_inference_steps
lowerCamelCase : List[str] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCamelCase : Dict = (np.arange(0 , _lowerCamelCase ) * step_ratio).round().copy().astype(np.intaa )
lowerCamelCase : str = torch.from_numpy(_lowerCamelCase ).to(_lowerCamelCase )
self.timesteps += self.config.steps_offset
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 0.0 , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
lowerCamelCase : List[str] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
lowerCamelCase : int = self.alphas_cumprod[timestep]
lowerCamelCase : Dict = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
lowerCamelCase : int = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
lowerCamelCase : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
lowerCamelCase : Any = model_output
elif self.config.prediction_type == "sample":
lowerCamelCase : Optional[int] = model_output
lowerCamelCase : Optional[int] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
lowerCamelCase : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
lowerCamelCase : Union[str, Any] = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
lowerCamelCase : int = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase : List[str] = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase : Union[str, Any] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_lowerCamelCase , pred_original_sample=_lowerCamelCase )
def __len__( self ) -> Any:
return self.config.num_train_timesteps
| 311 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 664 | 0 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
a_ : Optional[Any] = numpy.array([0, 0])
a_ : List[str] = numpy.array([0.5, 0.8660254])
a_ : str = numpy.array([1, 0])
a_ : Tuple = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = initial_vectors
for _ in range(lowerCamelCase_ ):
lowerCamelCase = iteration_step(lowerCamelCase_ )
return vectors
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = []
for i, start_vector in enumerate(vectors[:-1] ):
lowerCamelCase = vectors[i + 1]
new_vectors.append(lowerCamelCase_ )
lowerCamelCase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = numpy.radians(lowerCamelCase_ )
lowerCamelCase , lowerCamelCase = numpy.cos(lowerCamelCase_ ), numpy.sin(lowerCamelCase_ )
lowerCamelCase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowerCamelCase_ , lowerCamelCase_ )
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = plt.gca()
axes.set_aspect("equal" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
lowerCamelCase , lowerCamelCase = zip(*lowerCamelCase_ )
plt.plot(lowerCamelCase_ , lowerCamelCase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Tuple = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors) | 623 |
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def __snake_case ( lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
__magic_name__ = AutoConfig.from_pretrained(lowerCamelCase_ )
__magic_name__ = FlaxAutoModelForSeqaSeqLM.from_config(config=lowerCamelCase_ )
__magic_name__ = checkpoints.load_tax_checkpoint(lowerCamelCase_ )
__magic_name__ = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
__magic_name__ = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
__magic_name__ = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
__magic_name__ = F'layers_{str(lowerCamelCase_ )}'
# Self-Attention
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
__magic_name__ = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
__magic_name__ = flax_model.params["encoder"]["block"][str(lowerCamelCase_ )]["layer"]
__magic_name__ = tax_attention_key
__magic_name__ = tax_attention_out
__magic_name__ = tax_attention_query
__magic_name__ = tax_attention_value
__magic_name__ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = tax_global_layer_norm
if split_mlp_wi:
__magic_name__ = tax_mlp_wi_a
__magic_name__ = tax_mlp_wi_a
else:
__magic_name__ = tax_mlp_wi
__magic_name__ = tax_mlp_wo
__magic_name__ = tax_mlp_layer_norm
__magic_name__ = flax_model_encoder_layer_block
# Only for layer 0:
__magic_name__ = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
__magic_name__ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
__magic_name__ = tax_encoder_global_rel_embedding
# Assigning
__magic_name__ = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
__magic_name__ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
__magic_name__ = F'layers_{str(lowerCamelCase_ )}'
# Self-Attention
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
__magic_name__ = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
__magic_name__ = tax_enc_dec_attention_module["key"]["kernel"]
__magic_name__ = tax_enc_dec_attention_module["out"]["kernel"]
__magic_name__ = tax_enc_dec_attention_module["query"]["kernel"]
__magic_name__ = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
__magic_name__ = flax_model.params["decoder"]["block"][str(lowerCamelCase_ )]["layer"]
__magic_name__ = tax_attention_key
__magic_name__ = tax_attention_out
__magic_name__ = tax_attention_query
__magic_name__ = tax_attention_value
__magic_name__ = tax_pre_attention_layer_norm
__magic_name__ = tax_enc_dec_attention_key
__magic_name__ = tax_enc_dec_attention_out
__magic_name__ = tax_enc_dec_attention_query
__magic_name__ = tax_enc_dec_attention_value
__magic_name__ = tax_cross_layer_norm
if split_mlp_wi:
__magic_name__ = tax_mlp_wi_a
__magic_name__ = tax_mlp_wi_a
else:
__magic_name__ = tax_mlp_wi
__magic_name__ = tax_mlp_wo
__magic_name__ = txa_mlp_layer_norm
__magic_name__ = flax_model_decoder_layer_block
# Decoder Normalization
__magic_name__ = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
__magic_name__ = txa_decoder_norm
# Only for layer 0:
__magic_name__ = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
__magic_name__ = tax_decoder_rel_embedding
# Token Embeddings
__magic_name__ = tax_model["target"]["token_embedder"]["embedding"]
__magic_name__ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
__magic_name__ = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(lowerCamelCase_ )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
__magic_name__ : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
__magic_name__ : Optional[int] =parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 664 | 0 |
from __future__ import annotations
import queue
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Dict , __A : Union[str, Any] ):
snake_case__ : Optional[Any] = data
snake_case__ : Optional[Any] = None
snake_case__ : Tuple = None
def SCREAMING_SNAKE_CASE ( ):
print("\n********Press N to stop entering at any point of time********\n" )
snake_case__ : Any = input("Enter the value of the root node: " ).strip().lower()
snake_case__ : str = queue.Queue()
snake_case__ : Union[str, Any] = TreeNode(int(lowerCamelCase_ ) )
q.put(lowerCamelCase_ )
while not q.empty():
snake_case__ : int = q.get()
snake_case__ : Dict = F'''Enter the left node of {node_found.data}: '''
snake_case__ : List[str] = input(lowerCamelCase_ ).strip().lower() or "n"
if check == "n":
return tree_node
snake_case__ : Optional[int] = TreeNode(int(lowerCamelCase_ ) )
snake_case__ : Any = left_node
q.put(lowerCamelCase_ )
snake_case__ : List[str] = F'''Enter the right node of {node_found.data}: '''
snake_case__ : Optional[int] = input(lowerCamelCase_ ).strip().lower() or "n"
if check == "n":
return tree_node
snake_case__ : str = TreeNode(int(lowerCamelCase_ ) )
snake_case__ : List[Any] = right_node
q.put(lowerCamelCase_ )
raise
def SCREAMING_SNAKE_CASE ( snake_case_ : TreeNode ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or not node:
return
print(node.data , end="," )
pre_order(node.left )
pre_order(node.right )
def SCREAMING_SNAKE_CASE ( snake_case_ : TreeNode ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or not node:
return
in_order(node.left )
print(node.data , end="," )
in_order(node.right )
def SCREAMING_SNAKE_CASE ( snake_case_ : TreeNode ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end="," )
def SCREAMING_SNAKE_CASE ( snake_case_ : TreeNode ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or not node:
return
snake_case__ : Tuple = queue.Queue()
q.put(lowerCamelCase_ )
while not q.empty():
snake_case__ : int = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def SCREAMING_SNAKE_CASE ( snake_case_ : TreeNode ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or not node:
return
snake_case__ : Optional[int] = queue.Queue()
q.put(lowerCamelCase_ )
while not q.empty():
snake_case__ : Dict = []
while not q.empty():
snake_case__ : Union[str, Any] = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : TreeNode ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or not node:
return
snake_case__ : List[Any] = []
snake_case__ : Union[str, Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end="," )
stack.append(lowerCamelCase_ )
snake_case__ : str = n.left
# end of while means current node doesn't have left child
snake_case__ : Dict = stack.pop()
# start to traverse its right child
snake_case__ : Tuple = n.right
def SCREAMING_SNAKE_CASE ( snake_case_ : TreeNode ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or not node:
return
snake_case__ : str = []
snake_case__ : List[Any] = node
while n or stack:
while n:
stack.append(lowerCamelCase_ )
snake_case__ : Dict = n.left
snake_case__ : Optional[int] = stack.pop()
print(n.data , end="," )
snake_case__ : str = n.right
def SCREAMING_SNAKE_CASE ( snake_case_ : TreeNode ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or not node:
return
snake_case__, snake_case__ : Tuple = [], []
snake_case__ : Tuple = node
stacka.append(lowerCamelCase_ )
while stacka: # to find the reversed order of post order, store it in stack2
snake_case__ : str = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(lowerCamelCase_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end="," )
def SCREAMING_SNAKE_CASE ( snake_case_ : str = "" , snake_case_ : Optional[int]=50 , snake_case_ : Union[str, Any]="*" ):
if not s:
return "\n" + width * char
snake_case__, snake_case__ : Tuple = divmod(width - len(lowerCamelCase_ ) - 2 , 2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
__lowerCamelCase : TreeNode = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 50 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 297 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCamelCase_ ( unittest.TestCase , A ):
"""simple docstring"""
def __A ( self : Optional[int] ) -> Any:
__magic_name__ = load_tool("text-to-speech" )
self.tool.setup()
def __A ( self : Union[str, Any] ) -> int:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__magic_name__ = self.tool("hey" )
__magic_name__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def __A ( self : List[str] ) -> int:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__magic_name__ = self.tool("hey" )
__magic_name__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 664 | 0 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
a_ = {'UserAgent': UserAgent().random}
def __lowercase ( lowerCamelCase : Tuple ):
UpperCamelCase_ : List[Any] = script.contents[0]
UpperCamelCase_ : str = json.loads(data[data.find('{\"config\"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _lowercase :
def __init__( self : Any , snake_case : str ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = f"https://www.instagram.com/{username}/"
UpperCamelCase_ : Dict = self.get_json()
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> dict:
"""simple docstring"""
UpperCamelCase_ : int = requests.get(self.url , headers=_lowerCamelCase ).text
UpperCamelCase_ : Any = BeautifulSoup(_lowerCamelCase , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Dict ) -> str:
"""simple docstring"""
return f"{self.__class__.__name__}(\'{self.username}\')"
def __str__( self : Dict ) -> str:
"""simple docstring"""
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
"""simple docstring"""
return self.user_data["username"]
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
"""simple docstring"""
return self.user_data["full_name"]
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
"""simple docstring"""
return self.user_data["biography"]
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
"""simple docstring"""
return self.user_data["business_email"]
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> str:
"""simple docstring"""
return self.user_data["external_url"]
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> int:
"""simple docstring"""
return self.user_data["edge_followed_by"]["count"]
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return self.user_data["edge_follow"]["count"]
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> int:
"""simple docstring"""
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
"""simple docstring"""
return self.user_data["profile_pic_url_hd"]
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> bool:
"""simple docstring"""
return self.user_data["is_verified"]
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> bool:
"""simple docstring"""
return self.user_data["is_private"]
def __lowercase ( lowerCamelCase : str = "github" ):
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
UpperCamelCase_ : Optional[Any] = InstagramUser(lowerCamelCase_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowerCamelCase_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = InstagramUser('github')
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 417 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__magic_name__ : Dict =re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
__magic_name__ : int =10
__magic_name__ : Union[str, Any] =2_56
def __snake_case ( lowerCamelCase_ : List[str] ):
'''simple docstring'''
if len(lowerCamelCase_ ) < MIN_NUM_TOKENS:
return None
__magic_name__ = MinHash(num_perm=lowerCamelCase_ )
for token in set(lowerCamelCase_ ):
min_hash.update(token.encode() )
return min_hash
def __snake_case ( lowerCamelCase_ : str ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(lowerCamelCase_ ) if len(t.strip() ) > 0}
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , *,
_lowerCamelCase : float = 0.85 , ) -> Optional[Any]:
__magic_name__ = duplication_jaccard_threshold
__magic_name__ = NUM_PERM
__magic_name__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__magic_name__ = defaultdict(_lowerCamelCase )
def __A ( self : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : MinHash ) -> None:
__magic_name__ = self._index.query(_lowerCamelCase )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_lowerCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_lowerCamelCase )
def __A ( self : Union[str, Any] ) -> List[List[Dict]]:
__magic_name__ = []
for base, duplicates in self._duplicate_clusters.items():
__magic_name__ = [base] + list(_lowerCamelCase )
# reformat the cluster to be a list of dict
__magic_name__ = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(_lowerCamelCase )
return duplicate_clusters
def __A ( self : Tuple , _lowerCamelCase : Tuple ) -> None:
__magic_name__ = self.get_duplicate_clusters()
with open(_lowerCamelCase , "w" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
def __snake_case ( lowerCamelCase_ : List[Any] ):
'''simple docstring'''
__magic_name__ , __magic_name__ = element
__magic_name__ = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def __snake_case ( lowerCamelCase_ : Type[Dataset] ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCamelCase_ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def __snake_case ( lowerCamelCase_ : Type[Dataset] , lowerCamelCase_ : float ):
'''simple docstring'''
__magic_name__ = DuplicationIndex(duplication_jaccard_threshold=lowerCamelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCamelCase_ ) ) , max_queue_size=100 ) ):
di.add(lowerCamelCase_ , lowerCamelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
'''simple docstring'''
__magic_name__ = get_tokens(lowerCamelCase_ )
__magic_name__ = get_tokens(lowerCamelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__magic_name__ : List[str] =None
def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
__magic_name__ = []
for elementa in cluster:
__magic_name__ = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
__magic_name__ = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(lowerCamelCase_ , lowerCamelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__magic_name__ = 1
extremes.append(lowerCamelCase_ )
return extremes
def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
global _shared_dataset
__magic_name__ = dataset
__magic_name__ = []
__magic_name__ = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCamelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCamelCase_ , lowerCamelCase_ , ) , total=len(lowerCamelCase_ ) , ):
extremes_list.append(lowerCamelCase_ )
return extremes_list
def __snake_case ( lowerCamelCase_ : Type[Dataset] , lowerCamelCase_ : float = 0.85 ):
'''simple docstring'''
__magic_name__ = make_duplicate_clusters(lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
__magic_name__ = {}
__magic_name__ = find_extremes(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for extremes in extremes_clusters:
for element in extremes:
__magic_name__ = element
__magic_name__ = duplicate_indices - set(extreme_dict.keys() )
__magic_name__ = dataset.filter(lambda lowerCamelCase_ , lowerCamelCase_ : idx not in remove_indices , with_indices=lowerCamelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__magic_name__ = element["base_index"] in extreme_dict
if element["is_extreme"]:
__magic_name__ = extreme_dict[element["base_index"]]["copies"]
print(F'Original dataset size: {len(lowerCamelCase_ )}' )
print(F'Number of duplicate clusters: {len(lowerCamelCase_ )}' )
print(F'Files in duplicate cluster: {len(lowerCamelCase_ )}' )
print(F'Unique files in duplicate cluster: {len(lowerCamelCase_ )}' )
print(F'Filtered dataset size: {len(lowerCamelCase_ )}' )
return ds_filter, duplicate_clusters
| 664 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase_ = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['DeiTFeatureExtractor']
UpperCamelCase_ = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 384 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
__magic_name__ : Optional[int] =logging.get_logger(__name__)
__magic_name__ : Tuple ='The Nymphenburg Palace is a beautiful palace in Munich!'
def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
'''simple docstring'''
__magic_name__ = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1e-5,
"token_type_vocab_size": 2,
}
__magic_name__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__magic_name__ = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , lowerCamelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__magic_name__ = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
__magic_name__ = os.path.join(get_home_dir() , "models" )
__magic_name__ = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ )
__magic_name__ = nlp.model.BERTModel(
lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , )
original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ )
__magic_name__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
__magic_name__ = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(lowerCamelCase_ ),
}
__magic_name__ = BertConfig.from_dict(lowerCamelCase_ )
__magic_name__ = BertForMaskedLM(lowerCamelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase_ : Any ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int ):
__magic_name__ = hf_param.shape
__magic_name__ = to_torch(params[gluon_param] )
__magic_name__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__magic_name__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__magic_name__ = hf_bort_model.bert.encoder.layer[i]
# self attention
__magic_name__ = layer.attention.self
__magic_name__ = check_and_map_params(
self_attn.key.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
__magic_name__ = check_and_map_params(
self_attn.key.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
__magic_name__ = check_and_map_params(
self_attn.query.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
__magic_name__ = check_and_map_params(
self_attn.query.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
__magic_name__ = check_and_map_params(
self_attn.value.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
__magic_name__ = check_and_map_params(
self_attn.value.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
__magic_name__ = layer.attention.output
__magic_name__ = check_and_map_params(
self_output.dense.bias , F'encoder.transformer_cells.{i}.proj.bias' )
__magic_name__ = check_and_map_params(
self_output.dense.weight , F'encoder.transformer_cells.{i}.proj.weight' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.layer_norm.beta' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
__magic_name__ = layer.intermediate
__magic_name__ = check_and_map_params(
intermediate.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
__magic_name__ = check_and_map_params(
intermediate.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
__magic_name__ = layer.output
__magic_name__ = check_and_map_params(
bert_output.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
__magic_name__ = check_and_map_params(
bert_output.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__magic_name__ = RobertaTokenizer.from_pretrained("roberta-base" )
__magic_name__ = tokenizer.encode_plus(lowerCamelCase_ )["input_ids"]
# Get gluon output
__magic_name__ = mx.nd.array([input_ids] )
__magic_name__ = original_bort(inputs=lowerCamelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase_ )
__magic_name__ = BertModel.from_pretrained(lowerCamelCase_ )
hf_bort_model.eval()
__magic_name__ = tokenizer.encode_plus(lowerCamelCase_ , return_tensors="pt" )
__magic_name__ = hf_bort_model(**lowerCamelCase_ )[0]
__magic_name__ = output_gluon[0].asnumpy()
__magic_name__ = output_hf[0].detach().numpy()
__magic_name__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__magic_name__ = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , lowerCamelCase_ )
if __name__ == "__main__":
__magic_name__ : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ : Optional[Any] =parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 664 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 169 |
'''simple docstring'''
def __snake_case ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
__magic_name__ = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
__magic_name__ = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
__magic_name__ = max(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase_ ) , b_binary.zfill(lowerCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 664 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ : Union[str, Any] = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Dict = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : str = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Union[str, Any] = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Dict = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
UpperCamelCase_ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 461 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__magic_name__ : Tuple =threading.Lock()
__magic_name__ : Optional[logging.Handler] =None
__magic_name__ : List[str] ={
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
__magic_name__ : str =logging.WARNING
__magic_name__ : Any =True
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = os.getenv("TRANSFORMERS_VERBOSITY" , lowerCamelCase_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def __snake_case ( ):
'''simple docstring'''
return __name__.split("." )[0]
def __snake_case ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def __snake_case ( ):
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__magic_name__ = logging.StreamHandler() # Set sys.stderr as stream.
__magic_name__ = sys.stderr.flush
# Apply our default configuration to the library root logger.
__magic_name__ = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__magic_name__ = False
def __snake_case ( ):
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
__magic_name__ = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__magic_name__ = None
def __snake_case ( ):
'''simple docstring'''
return log_levels
def __snake_case ( lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
if name is None:
__magic_name__ = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __snake_case ( lowerCamelCase_ : int ):
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __snake_case ( lowerCamelCase_ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(lowerCamelCase_ )
def __snake_case ( lowerCamelCase_ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
__magic_name__ = False
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
__magic_name__ = True
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = _get_library_root_logger().handlers
for handler in handlers:
__magic_name__ = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(lowerCamelCase_ )
def __snake_case ( self : Union[str, Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : Any ):
'''simple docstring'''
__magic_name__ = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , lowerCamelCase_ )
if no_advisory_warnings:
return
self.warning(*lowerCamelCase_ , **lowerCamelCase_ )
__magic_name__ : int =warning_advice
@functools.lru_cache(lowerCamelCase_ )
def __snake_case ( self : Dict , *lowerCamelCase_ : int , **lowerCamelCase_ : int ):
'''simple docstring'''
self.warning(*lowerCamelCase_ , **lowerCamelCase_ )
__magic_name__ : Optional[int] =warning_once
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , *_lowerCamelCase : Tuple , **_lowerCamelCase : Optional[Any] ) -> Any: # pylint: disable=unused-argument
__magic_name__ = args[0] if args else None
def __iter__( self : int ) -> Tuple:
return iter(self._iterator )
def __getattr__( self : List[Any] , _lowerCamelCase : int ) -> List[Any]:
def empty_fn(*_lowerCamelCase : List[str] , **_lowerCamelCase : List[str] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[Any] ) -> Any:
return self
def __exit__( self : int , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] ) -> Dict:
return
class UpperCamelCase_ :
"""simple docstring"""
def __call__( self : Any , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Any ) -> List[Any]:
if _tqdm_active:
return tqdm_lib.tqdm(*_lowerCamelCase , **_lowerCamelCase )
else:
return EmptyTqdm(*_lowerCamelCase , **_lowerCamelCase )
def __A ( self : Optional[Any] , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Dict ) -> Union[str, Any]:
__magic_name__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_lowerCamelCase , **_lowerCamelCase )
def __A ( self : str ) -> Any:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__magic_name__ : List[Any] =_tqdm_cls()
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
__magic_name__ = True
hf_hub_utils.enable_progress_bars()
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
__magic_name__ = False
hf_hub_utils.disable_progress_bars()
| 664 | 0 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__lowerCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase :
UpperCamelCase_ : str = field(
default=__lowerCamelCase , metadata={'help': 'Model type selected in the list: ' + ', '.join(__lowerCamelCase )} )
UpperCamelCase_ : str = field(
default=__lowerCamelCase , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
UpperCamelCase_ : int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCamelCase_ : int = field(
default=1_2_8 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
UpperCamelCase_ : int = field(
default=6_4 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
UpperCamelCase_ : int = field(
default=3_0 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
UpperCamelCase_ : bool = field(
default=__lowerCamelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
UpperCamelCase_ : bool = field(
default=__lowerCamelCase , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
UpperCamelCase_ : float = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
UpperCamelCase_ : int = field(
default=2_0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
UpperCamelCase_ : int = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
UpperCamelCase_ : int = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class lowerCamelCase ( __lowerCamelCase ):
UpperCamelCase_ : Optional[int] = '''train'''
UpperCamelCase_ : List[Any] = '''dev'''
class lowerCamelCase ( __lowerCamelCase ):
UpperCamelCase_ : SquadDataTrainingArguments
UpperCamelCase_ : List[SquadFeatures]
UpperCamelCase_ : Split
UpperCamelCase_ : bool
def __init__( self :int , lowercase :SquadDataTrainingArguments , lowercase :PreTrainedTokenizer , lowercase :Optional[int] = None , lowercase :Union[str, Split] = Split.train , lowercase :Optional[bool] = False , lowercase :Optional[str] = None , lowercase :Optional[str] = "pt" , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = args
SCREAMING_SNAKE_CASE = is_language_sensitive
SCREAMING_SNAKE_CASE = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
SCREAMING_SNAKE_CASE = mode
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE = '''v2''' if args.version_2_with_negative else '''v1'''
SCREAMING_SNAKE_CASE = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE = cached_features_file + '''.lock'''
with FileLock(_lowerCamelCase ):
if os.path.exists(_lowerCamelCase ) and not args.overwrite_cache:
SCREAMING_SNAKE_CASE = time.time()
SCREAMING_SNAKE_CASE = torch.load(_lowerCamelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
SCREAMING_SNAKE_CASE = self.old_features['''features''']
SCREAMING_SNAKE_CASE = self.old_features.get('''dataset''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE = self.old_features.get('''examples''' , _lowerCamelCase )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
''' future run''' )
else:
if mode == Split.dev:
SCREAMING_SNAKE_CASE = self.processor.get_dev_examples(args.data_dir )
else:
SCREAMING_SNAKE_CASE = self.processor.get_train_examples(args.data_dir )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowerCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowerCamelCase , )
SCREAMING_SNAKE_CASE = time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , _lowerCamelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self :Any ) -> Optional[Any]:
"""simple docstring"""
return len(self.features )
def __getitem__( self :str , lowercase :Optional[Any] ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.features[i]
SCREAMING_SNAKE_CASE = torch.tensor(feature.input_ids , dtype=torch.long )
SCREAMING_SNAKE_CASE = torch.tensor(feature.attention_mask , dtype=torch.long )
SCREAMING_SNAKE_CASE = torch.tensor(feature.token_type_ids , dtype=torch.long )
SCREAMING_SNAKE_CASE = torch.tensor(feature.cls_index , dtype=torch.long )
SCREAMING_SNAKE_CASE = torch.tensor(feature.p_mask , dtype=torch.float )
SCREAMING_SNAKE_CASE = torch.tensor(feature.is_impossible , dtype=torch.float )
SCREAMING_SNAKE_CASE = {
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
SCREAMING_SNAKE_CASE = torch.tensor(feature.start_position , dtype=torch.long )
SCREAMING_SNAKE_CASE = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs | 201 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : Union[str, Any] ={'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str =[
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__magic_name__ : List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 664 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=7 ,_lowerCAmelCase=3 ,_lowerCAmelCase=18 ,_lowerCAmelCase=30 ,_lowerCAmelCase=4_00 ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,):
lowerCamelCase__ = size if size is not None else {"""height""": 18, """width""": 18}
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = min_resolution
lowerCamelCase__ = max_resolution
lowerCamelCase__ = do_resize
lowerCamelCase__ = size
lowerCamelCase__ = do_normalize
lowerCamelCase__ = image_mean
lowerCamelCase__ = image_std
def UpperCamelCase_ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = DPTImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
lowerCamelCase__ = DPTImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(_lowerCamelCase ,"""image_std""" ) )
self.assertTrue(hasattr(_lowerCamelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(_lowerCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(_lowerCamelCase ,"""size""" ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 18, """width""": 18} )
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} )
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase ,Image.Image )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCamelCase ,numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase ,np.ndarray )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCamelCase ,torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase ,torch.Tensor )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
| 50 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ : Optional[Any] ={
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : int =['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Dict =[
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Tuple =[
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__magic_name__ : int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 664 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : int = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 120 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
__magic_name__ : str ={
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
__magic_name__ : Tuple ={
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def __snake_case ( lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
__magic_name__ = (images / 2 + 0.5).clamp(0 , 1 )
__magic_name__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__magic_name__ = numpy_to_pil(lowerCamelCase_ )
return images
def __snake_case ( lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
if images.ndim == 3:
__magic_name__ = images[None, ...]
__magic_name__ = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__magic_name__ = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
__magic_name__ = [Image.fromarray(lowerCamelCase_ ) for image in images]
return pil_images
| 664 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
import random
from typing import Any
class lowerCAmelCase__ :
def __init__( self ):
'''simple docstring'''
A__ = []
A__ = 0
A__ = 0
def lowercase_ ( self ):
'''simple docstring'''
return self.head == self.tail
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
self.data.append(_lowerCamelCase )
A__ = self.tail + 1
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.data[self.head]
A__ = self.head + 1
return ret
def lowercase_ ( self ):
'''simple docstring'''
return self.tail - self.head
def lowercase_ ( self ):
'''simple docstring'''
print(self.data )
print("**************" )
print(self.data[self.head : self.tail] )
class lowerCAmelCase__ :
def __init__( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = data
A__ = None
A__ = None
A__ = 1
def lowercase_ ( self ):
'''simple docstring'''
return self.data
def lowercase_ ( self ):
'''simple docstring'''
return self.left
def lowercase_ ( self ):
'''simple docstring'''
return self.right
def lowercase_ ( self ):
'''simple docstring'''
return self.height
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = data
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = node
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = node
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = height
def __a ( A ) -> Optional[int]:
'''simple docstring'''
if node is None:
return 0
return node.get_height()
def __a ( A , A ) -> Optional[int]:
'''simple docstring'''
if a > b:
return a
return b
def __a ( A ) -> Any:
'''simple docstring'''
print("left rotation node:" , node.get_data() )
A__ = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowerCamelCase_ )
A__ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCamelCase_ )
A__ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCamelCase_ )
return ret
def __a ( A ) -> Union[str, Any]:
'''simple docstring'''
print("right rotation node:" , node.get_data() )
A__ = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowerCamelCase_ )
A__ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCamelCase_ )
A__ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCamelCase_ )
return ret
def __a ( A ) -> Optional[int]:
'''simple docstring'''
A__ = node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowerCamelCase_ ) )
return right_rotation(lowerCamelCase_ )
def __a ( A ) -> Tuple:
'''simple docstring'''
A__ = node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowerCamelCase_ ) )
return left_rotation(lowerCamelCase_ )
def __a ( A , A ) -> str:
'''simple docstring'''
if node is None:
return MyNode(lowerCamelCase_ )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowerCamelCase_ ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
A__ = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
A__ = right_rotation(lowerCamelCase_ )
else:
A__ = lr_rotation(lowerCamelCase_ )
else:
node.set_right(insert_node(node.get_right() , lowerCamelCase_ ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
A__ = node.get_right()
assert right_child is not None
if data < right_child.get_data():
A__ = rl_rotation(lowerCamelCase_ )
else:
A__ = left_rotation(lowerCamelCase_ )
A__ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCamelCase_ )
return node
def __a ( A ) -> Tuple:
'''simple docstring'''
while True:
A__ = root.get_right()
if right_child is None:
break
A__ = right_child
return root.get_data()
def __a ( A ) -> Any:
'''simple docstring'''
while True:
A__ = root.get_left()
if left_child is None:
break
A__ = left_child
return root.get_data()
def __a ( A , A ) -> Optional[Any]:
'''simple docstring'''
A__ = root.get_left()
A__ = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
A__ = get_left_most(lowerCamelCase_ )
root.set_data(lowerCamelCase_ )
root.set_right(del_node(lowerCamelCase_ , lowerCamelCase_ ) )
elif left_child is not None:
A__ = left_child
elif right_child is not None:
A__ = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("No such data" )
return root
else:
root.set_left(del_node(lowerCamelCase_ , lowerCamelCase_ ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowerCamelCase_ , lowerCamelCase_ ) )
if get_height(lowerCamelCase_ ) - get_height(lowerCamelCase_ ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
A__ = left_rotation(lowerCamelCase_ )
else:
A__ = rl_rotation(lowerCamelCase_ )
elif get_height(lowerCamelCase_ ) - get_height(lowerCamelCase_ ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
A__ = right_rotation(lowerCamelCase_ )
else:
A__ = lr_rotation(lowerCamelCase_ )
A__ = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowerCamelCase_ )
return root
class lowerCAmelCase__ :
def __init__( self ):
'''simple docstring'''
A__ = None
def lowercase_ ( self ):
'''simple docstring'''
return get_height(self.root )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
print("insert:" + str(_lowerCamelCase ) )
A__ = insert_node(self.root , _lowerCamelCase )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
print("delete:" + str(_lowerCamelCase ) )
if self.root is None:
print("Tree is empty!" )
return
A__ = del_node(self.root , _lowerCamelCase )
def __str__( self , ): # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
A__ = ""
A__ = MyQueue()
q.push(self.root )
A__ = self.get_height()
if layer == 0:
return output
A__ = 0
while not q.is_empty():
A__ = q.pop()
A__ = " " * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(_lowerCamelCase )
q.push(_lowerCamelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
A__ = cnt + 1
for i in range(1_00 ):
if cnt == math.pow(2 , _lowerCamelCase ) - 1:
A__ = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __a ( ) -> List[str]:
'''simple docstring'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
__UpperCAmelCase =AVLtree()
__UpperCAmelCase =list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t)) | 337 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__magic_name__ : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(
A , r'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __A ( self : Any , _lowerCamelCase : GenericTensor ) -> np.ndarray:
if self.framework == "tf":
__magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase )
else:
raise ValueError("Unsupported framework" )
return masked_index
def __A ( self : str , _lowerCamelCase : GenericTensor ) -> np.ndarray:
__magic_name__ = self.get_masked_index(_lowerCamelCase )
__magic_name__ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def __A ( self : int , _lowerCamelCase : GenericTensor ) -> Any:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowerCamelCase )
def __A ( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Any=None , **_lowerCamelCase : List[str] ) -> Dict[str, GenericTensor]:
if return_tensors is None:
__magic_name__ = self.framework
__magic_name__ = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase )
self.ensure_exactly_one_mask_token(_lowerCamelCase )
return model_inputs
def __A ( self : List[str] , _lowerCamelCase : int ) -> List[Any]:
__magic_name__ = self.model(**_lowerCamelCase )
__magic_name__ = model_inputs["input_ids"]
return model_outputs
def __A ( self : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any]=5 , _lowerCamelCase : Dict=None ) -> Dict:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__magic_name__ = target_ids.shape[0]
__magic_name__ = model_outputs["input_ids"][0]
__magic_name__ = model_outputs["logits"]
if self.framework == "tf":
__magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__magic_name__ = outputs.numpy()
__magic_name__ = outputs[0, masked_index, :]
__magic_name__ = stable_softmax(_lowerCamelCase , axis=-1 )
if target_ids is not None:
__magic_name__ = tf.gather_nd(tf.squeeze(_lowerCamelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
__magic_name__ = tf.expand_dims(_lowerCamelCase , 0 )
__magic_name__ = tf.math.top_k(_lowerCamelCase , k=_lowerCamelCase )
__magic_name__ , __magic_name__ = topk.values.numpy(), topk.indices.numpy()
else:
__magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__magic_name__ = outputs[0, masked_index, :]
__magic_name__ = logits.softmax(dim=-1 )
if target_ids is not None:
__magic_name__ = probs[..., target_ids]
__magic_name__ , __magic_name__ = probs.topk(_lowerCamelCase )
__magic_name__ = []
__magic_name__ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__magic_name__ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__magic_name__ = input_ids.numpy().copy()
if target_ids is not None:
__magic_name__ = target_ids[p].tolist()
__magic_name__ = p
# Filter padding out:
__magic_name__ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__magic_name__ = self.tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
__magic_name__ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(_lowerCamelCase )
result.append(_lowerCamelCase )
if single_mask:
return result[0]
return result
def __A ( self : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[Any]=None ) -> List[str]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = [targets]
try:
__magic_name__ = self.tokenizer.get_vocab()
except Exception:
__magic_name__ = {}
__magic_name__ = []
for target in targets:
__magic_name__ = vocab.get(_lowerCamelCase , _lowerCamelCase )
if id_ is None:
__magic_name__ = self.tokenizer(
_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , max_length=1 , truncation=_lowerCamelCase , )["input_ids"]
if len(_lowerCamelCase ) == 0:
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
"We cannot replace it with anything meaningful, ignoring it" )
continue
__magic_name__ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
__magic_name__ = list(set(_lowerCamelCase ) )
if len(_lowerCamelCase ) == 0:
raise ValueError("At least one target must be provided when passed." )
__magic_name__ = np.array(_lowerCamelCase )
return target_ids
def __A ( self : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : int=None ) -> Tuple:
__magic_name__ = {}
if targets is not None:
__magic_name__ = self.get_target_ids(_lowerCamelCase , _lowerCamelCase )
__magic_name__ = target_ids
if top_k is not None:
__magic_name__ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__( self : int , _lowerCamelCase : Any , *_lowerCamelCase : str , **_lowerCamelCase : int ) -> Optional[int]:
__magic_name__ = super().__call__(_lowerCamelCase , **_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) == 1:
return outputs[0]
return outputs
| 664 | 0 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : torch.FloatTensor
lowerCamelCase_ : torch.FloatTensor
class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Any = 1
@register_to_config
def __init__( self , UpperCamelCase__ = 2000 , UpperCamelCase__ = 0.15 , UpperCamelCase__ = 0.01 , UpperCamelCase__ = 1348.0 , UpperCamelCase__ = 1e-5 , UpperCamelCase__ = 1 , ) -> List[Any]:
# standard deviation of the initial noise distribution
lowerCamelCase : Dict = sigma_max
# setable values
lowerCamelCase : Dict = None
self.set_sigmas(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> torch.FloatTensor:
return sample
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None ) -> Tuple:
lowerCamelCase : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowerCamelCase : List[str] = torch.linspace(1 , _lowerCamelCase , _lowerCamelCase , device=_lowerCamelCase )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None ) -> List[str]:
lowerCamelCase : int = sigma_min if sigma_min is not None else self.config.sigma_min
lowerCamelCase : Union[str, Any] = sigma_max if sigma_max is not None else self.config.sigma_max
lowerCamelCase : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase : Dict = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowerCamelCase : Union[str, Any] = torch.exp(torch.linspace(math.log(_lowerCamelCase ) , math.log(_lowerCamelCase ) , _lowerCamelCase ) )
lowerCamelCase : Union[str, Any] = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
lowerCamelCase : Any = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowerCamelCase : Union[str, Any] = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowerCamelCase : List[str] = timesteps.to(self.discrete_sigmas.device )
lowerCamelCase : Any = self.discrete_sigmas[timesteps].to(sample.device )
lowerCamelCase : int = self.get_adjacent_sigma(_lowerCamelCase , _lowerCamelCase ).to(sample.device )
lowerCamelCase : Dict = torch.zeros_like(_lowerCamelCase )
lowerCamelCase : Optional[Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowerCamelCase : List[Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowerCamelCase : Dict = diffusion.unsqueeze(-1 )
lowerCamelCase : Union[str, Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowerCamelCase : Union[str, Any] = randn_tensor(
sample.shape , layout=sample.layout , generator=_lowerCamelCase , device=sample.device , dtype=sample.dtype )
lowerCamelCase : str = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowerCamelCase : List[str] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_lowerCamelCase , prev_sample_mean=_lowerCamelCase )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowerCamelCase : List[str] = randn_tensor(sample.shape , layout=sample.layout , generator=_lowerCamelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowerCamelCase : Optional[int] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
lowerCamelCase : Optional[int] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
lowerCamelCase : Tuple = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowerCamelCase : Optional[Any] = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowerCamelCase : List[str] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowerCamelCase : Union[str, Any] = step_size.unsqueeze(-1 )
lowerCamelCase : Optional[int] = sample + step_size * model_output
lowerCamelCase : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowerCamelCase )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowerCamelCase : Any = timesteps.to(original_samples.device )
lowerCamelCase : Any = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowerCamelCase : Union[str, Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_lowerCamelCase ) * sigmas[:, None, None, None]
)
lowerCamelCase : Optional[int] = noise + original_samples
return noisy_samples
def __len__( self ) -> Dict:
return self.config.num_train_timesteps
| 311 |
'''simple docstring'''
from __future__ import annotations
def __snake_case ( lowerCamelCase_ : list[int] , lowerCamelCase_ : int ):
'''simple docstring'''
if len(lowerCamelCase_ ) < k or k < 0:
raise ValueError("Invalid Input" )
__magic_name__ = __magic_name__ = sum(array[:k] )
for i in range(len(lowerCamelCase_ ) - k ):
__magic_name__ = current_sum - array[i] + array[i + k]
__magic_name__ = max(lowerCamelCase_ , lowerCamelCase_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__magic_name__ : List[str] =[randint(-10_00, 10_00) for i in range(1_00)]
__magic_name__ : List[str] =randint(0, 1_10)
print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 664 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : str = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
a_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure) | 623 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : int =logging.get_logger(__name__)
__magic_name__ : List[Any] ={}
class UpperCamelCase_ ( A ):
"""simple docstring"""
UpperCAmelCase__ : int = '''llama'''
UpperCAmelCase__ : Any = ['''past_key_values''']
def __init__( self : List[Any] , _lowerCamelCase : List[Any]=3_20_00 , _lowerCamelCase : Optional[Any]=40_96 , _lowerCamelCase : Tuple=1_10_08 , _lowerCamelCase : List[Any]=32 , _lowerCamelCase : Tuple=32 , _lowerCamelCase : List[str]=None , _lowerCamelCase : str="silu" , _lowerCamelCase : Optional[Any]=20_48 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : Union[str, Any]=1e-6 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Dict=0 , _lowerCamelCase : int=1 , _lowerCamelCase : str=2 , _lowerCamelCase : List[Any]=1 , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : List[str]=None , **_lowerCamelCase : List[Any] , ) -> Any:
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = hidden_size
__magic_name__ = intermediate_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__magic_name__ = num_attention_heads
__magic_name__ = num_key_value_heads
__magic_name__ = hidden_act
__magic_name__ = initializer_range
__magic_name__ = rms_norm_eps
__magic_name__ = pretraining_tp
__magic_name__ = use_cache
__magic_name__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase , )
def __A ( self : Union[str, Any] ) -> List[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'got {self.rope_scaling}' )
__magic_name__ = self.rope_scaling.get("type" , _lowerCamelCase )
__magic_name__ = self.rope_scaling.get("factor" , _lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(_lowerCamelCase , _lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 664 | 0 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a_ = inspect.getfile(accelerate.test_utils )
a_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_cli.py"] )
a_ = ['''accelerate''', '''launch''']
a_ = Path.home() / '''.cache/huggingface/accelerate'''
a_ = '''default_config.yaml'''
a_ = config_folder / config_file
a_ = config_folder / '''_default_config.yaml'''
a_ = Path("tests/test_configs" )
@classmethod
def _lowercase ( cls : Tuple ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _lowercase ( cls : Dict ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _lowercase ( self : str ):
snake_case__ : Optional[int] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _lowercase ( self : Tuple ):
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=_lowerCamelCase ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(_lowerCamelCase ), self.test_file_path] , env=os.environ.copy() )
def _lowercase ( self : Dict ):
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a_ = '''test-tpu'''
a_ = '''us-central1-a'''
a_ = '''ls'''
a_ = ['''accelerate''', '''tpu-config''']
a_ = '''cd /usr/share'''
a_ = '''tests/test_samples/test_command_file.sh'''
a_ = '''Running gcloud compute tpus tpu-vm ssh'''
def _lowercase ( self : Dict ):
snake_case__ : Union[str, Any] = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=_lowerCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _lowerCamelCase , )
def _lowercase ( self : Tuple ):
snake_case__ : str = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=_lowerCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _lowerCamelCase , )
def _lowercase ( self : Optional[Any] ):
snake_case__ : Optional[Any] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=_lowerCamelCase )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _lowerCamelCase , )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Tuple = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=_lowerCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _lowerCamelCase , )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=_lowerCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , _lowerCamelCase , )
def _lowercase ( self : Optional[Any] ):
snake_case__ : Union[str, Any] = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=_lowerCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _lowerCamelCase , )
def _lowercase ( self : Tuple ):
snake_case__ : Dict = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=_lowerCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _lowerCamelCase , )
def _lowercase ( self : Tuple ):
snake_case__ : Optional[int] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=_lowerCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , _lowerCamelCase , )
def _lowercase ( self : Optional[Any] ):
snake_case__ : Tuple = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=_lowerCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , _lowerCamelCase , )
| 297 |
'''simple docstring'''
__magic_name__ : Dict =8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def __snake_case ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __snake_case ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 664 | 0 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
a_ = logging.get_logger(__name__)
class _lowercase ( snake_case_ ):
lowercase = '''vision-encoder-decoder'''
lowercase = True
def __init__( self : Tuple , **snake_case : Dict ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**_lowerCamelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"A configuraton of type {self.model_type} cannot be instantiated because "
f"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" )
UpperCamelCase_ : Dict = kwargs.pop('encoder' )
UpperCamelCase_ : Optional[Any] = encoder_config.pop('model_type' )
UpperCamelCase_ : Optional[int] = kwargs.pop('decoder' )
UpperCamelCase_ : str = decoder_config.pop('model_type' )
UpperCamelCase_ : str = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_ : str = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_ : Dict = True
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Dict , snake_case : PretrainedConfig , snake_case : PretrainedConfig , **snake_case : int ) -> PretrainedConfig:
"""simple docstring"""
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
UpperCamelCase_ : int = True
UpperCamelCase_ : Dict = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : List[str] = copy.deepcopy(self.__dict__ )
UpperCamelCase_ : List[Any] = self.encoder.to_dict()
UpperCamelCase_ : Union[str, Any] = self.decoder.to_dict()
UpperCamelCase_ : int = self.__class__.model_type
return output
class _lowercase ( snake_case_ ):
lowercase = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> float:
"""simple docstring"""
return 1e-4
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class _lowercase ( snake_case_ ):
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
UpperCamelCase_ : int = OrderedDict()
UpperCamelCase_ : Optional[Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
UpperCamelCase_ : Optional[Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
UpperCamelCase_ : List[str] = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : "PreTrainedTokenizerBase" , snake_case : int = -1 , snake_case : int = -1 , snake_case : bool = False , snake_case : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
import torch
UpperCamelCase_ : str = OrderedDict()
UpperCamelCase_ : List[Any] = super().generate_dummy_inputs(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
UpperCamelCase_, UpperCamelCase_ : Dict = dummy_input['input_ids'].shape
UpperCamelCase_ : Tuple = (batch, encoder_sequence, self._config.encoder_hidden_size)
UpperCamelCase_ : Optional[int] = dummy_input.pop('input_ids' )
UpperCamelCase_ : str = dummy_input.pop('attention_mask' )
UpperCamelCase_ : Tuple = torch.zeros(_lowerCamelCase )
return common_inputs
class _lowercase ( snake_case_ ):
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> None:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : PretrainedConfig ) -> OnnxConfig:
"""simple docstring"""
return VisionEncoderDecoderEncoderOnnxConfig(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : PretrainedConfig , snake_case : PretrainedConfig , snake_case : str = "default" ) -> OnnxConfig:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_lowerCamelCase , _lowerCamelCase )
| 417 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__magic_name__ : List[Any] =logging.getLogger(__name__)
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : str=-1 ) -> List[str]:
# in NER datasets, the last column is usually reserved for NER label
__magic_name__ = label_idx
def __A ( self : Any , _lowerCamelCase : str , _lowerCamelCase : Union[Split, str] ) -> List[InputExample]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = mode.value
__magic_name__ = os.path.join(_lowerCamelCase , f'{mode}.txt' )
__magic_name__ = 1
__magic_name__ = []
with open(_lowerCamelCase , encoding="utf-8" ) as f:
__magic_name__ = []
__magic_name__ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
__magic_name__ = []
__magic_name__ = []
else:
__magic_name__ = line.split(" " )
words.append(splits[0] )
if len(_lowerCamelCase ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
return examples
def __A ( self : Optional[Any] , _lowerCamelCase : TextIO , _lowerCamelCase : TextIO , _lowerCamelCase : List ) -> Union[str, Any]:
__magic_name__ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(_lowerCamelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__magic_name__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(_lowerCamelCase )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def __A ( self : Tuple , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
__magic_name__ = f.read().splitlines()
if "O" not in labels:
__magic_name__ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __init__( self : int ) -> str:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __A ( self : int , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
__magic_name__ = f.read().splitlines()
if "O" not in labels:
__magic_name__ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __A ( self : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[Split, str] ) -> List[InputExample]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = mode.value
__magic_name__ = os.path.join(_lowerCamelCase , f'{mode}.txt' )
__magic_name__ = 1
__magic_name__ = []
with open(_lowerCamelCase , encoding="utf-8" ) as f:
for sentence in parse_incr(_lowerCamelCase ):
__magic_name__ = []
__magic_name__ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
return examples
def __A ( self : Optional[int] , _lowerCamelCase : TextIO , _lowerCamelCase : TextIO , _lowerCamelCase : List ) -> Any:
__magic_name__ = 0
for sentence in parse_incr(_lowerCamelCase ):
__magic_name__ = preds_list[example_id]
__magic_name__ = ""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(_lowerCamelCase )
example_id += 1
def __A ( self : Dict , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 664 | 0 |
'''simple docstring'''
UpperCamelCase_ = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCamelCase_ = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCamelCase_ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 384 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0 ) -> None:
__magic_name__ , __magic_name__ = row, column
__magic_name__ = [[default_value for c in range(_lowerCamelCase )] for r in range(_lowerCamelCase )]
def __str__( self : Optional[Any] ) -> str:
__magic_name__ = f'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
__magic_name__ = 0
for row_vector in self.array:
for obj in row_vector:
__magic_name__ = max(_lowerCamelCase , len(str(_lowerCamelCase ) ) )
__magic_name__ = f'%{max_element_length}s'
# Make string and return
def single_line(_lowerCamelCase : list[float] ) -> str:
nonlocal string_format_identifier
__magic_name__ = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self : Optional[int] ) -> str:
return str(self )
def __A ( self : Optional[Any] , _lowerCamelCase : tuple[int, int] ) -> bool:
if not (isinstance(_lowerCamelCase , (list, tuple) ) and len(_lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Optional[int] , _lowerCamelCase : tuple[int, int] ) -> Any:
assert self.validate_indicies(_lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Tuple , _lowerCamelCase : tuple[int, int] , _lowerCamelCase : float ) -> None:
assert self.validate_indicies(_lowerCamelCase )
__magic_name__ = value
def __add__( self : Union[str, Any] , _lowerCamelCase : Matrix ) -> Matrix:
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c] + another[r, c]
return result
def __neg__( self : int ) -> Matrix:
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = -self[r, c]
return result
def __sub__( self : Optional[int] , _lowerCamelCase : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : Optional[int] , _lowerCamelCase : int | float | Matrix ) -> Matrix:
if isinstance(_lowerCamelCase , (int, float) ): # Scalar multiplication
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c] * another
return result
elif isinstance(_lowerCamelCase , _lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
__magic_name__ = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__magic_name__ = f'Unsupported type given for another ({type(_lowerCamelCase )})'
raise TypeError(_lowerCamelCase )
def __A ( self : Optional[int] ) -> Matrix:
__magic_name__ = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c]
return result
def __A ( self : int , _lowerCamelCase : Matrix , _lowerCamelCase : Matrix ) -> Any:
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__magic_name__ = v.transpose()
__magic_name__ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = Matrix(3 , 3 , 0 )
for i in range(3 ):
__magic_name__ = 1
print(F'a^(-1) is {ainv}' )
# u, v
__magic_name__ = Matrix(3 , 1 , 0 )
__magic_name__ , __magic_name__ , __magic_name__ = 1, 2, -3
__magic_name__ = Matrix(3 , 1 , 0 )
__magic_name__ , __magic_name__ , __magic_name__ = 4, -2, 5
print(F'u is {u}' )
print(F'v is {v}' )
print(F'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(F'(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase_ , lowerCamelCase_ )}' )
def __snake_case ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 664 | 0 |
"""simple docstring"""
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
a = 'Create a default config file for Accelerate with only a few flags set.'
def lowercase (snake_case__ : Any="no" , snake_case__ : str = default_json_config_file , snake_case__ : bool = False ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = Path(lowerCamelCase_ )
path.parent.mkdir(parents=lowerCamelCase_ , exist_ok=lowerCamelCase_ )
if path.exists():
print(
f'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
lowerCAmelCase = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
lowerCAmelCase = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
lowerCAmelCase = torch.cuda.device_count()
lowerCAmelCase = num_gpus
lowerCAmelCase = False
if num_gpus > 1:
lowerCAmelCase = """MULTI_GPU"""
else:
lowerCAmelCase = """NO"""
elif is_xpu_available() and use_xpu:
lowerCAmelCase = torch.xpu.device_count()
lowerCAmelCase = num_xpus
lowerCAmelCase = False
if num_xpus > 1:
lowerCAmelCase = """MULTI_XPU"""
else:
lowerCAmelCase = """NO"""
elif is_npu_available():
lowerCAmelCase = torch.npu.device_count()
lowerCAmelCase = num_npus
lowerCAmelCase = False
if num_npus > 1:
lowerCAmelCase = """MULTI_NPU"""
else:
lowerCAmelCase = """NO"""
else:
lowerCAmelCase = 0
lowerCAmelCase = True
lowerCAmelCase = 1
lowerCAmelCase = """NO"""
lowerCAmelCase = ClusterConfig(**lowerCamelCase_ )
config.to_json_file(lowerCamelCase_ )
return path
def lowercase (snake_case__ : Any , snake_case__ : int ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase = parser.add_parser("""default""" , parents=lowerCamelCase_ , help=lowerCamelCase_ , formatter_class=lowerCamelCase_ )
parser.add_argument(
"""--config_file""" , default=lowerCamelCase_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , dest="""save_location""" , )
parser.add_argument(
"""--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=lowerCamelCase_ , help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , )
parser.set_defaults(func=lowerCamelCase_ )
return parser
def lowercase (snake_case__ : str ) -> str:
'''simple docstring'''
lowerCAmelCase = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f'''accelerate configuration saved at {config_file}''' )
| 169 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__magic_name__ : List[Any] =logging.getLogger(__name__)
__magic_name__ : int ='Hello world! cécé herlolip'
__magic_name__ : List[Any] =namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def __snake_case ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict ):
'''simple docstring'''
__magic_name__ = BertAbsConfig(
temp_dir="." , finetune_bert=lowerCamelCase_ , large=lowerCamelCase_ , share_emb=lowerCamelCase_ , use_bert_emb=lowerCamelCase_ , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__magic_name__ = torch.load(lowerCamelCase_ , lambda lowerCamelCase_ , lowerCamelCase_ : storage )
__magic_name__ = AbsSummarizer(lowerCamelCase_ , torch.device("cpu" ) , lowerCamelCase_ )
original.eval()
__magic_name__ = BertAbsSummarizer(lowerCamelCase_ , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
__magic_name__ = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
__magic_name__ = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) )
__magic_name__ = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
__magic_name__ = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) )
__magic_name__ = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__magic_name__ = encoder_input_ids
__magic_name__ = decoder_input_ids
__magic_name__ = __magic_name__ = None
__magic_name__ = None
__magic_name__ = __magic_name__ = None
__magic_name__ = __magic_name__ = None
__magic_name__ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__magic_name__ = original(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
__magic_name__ = original.generator(lowerCamelCase_ )
__magic_name__ = new_model(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
__magic_name__ = new_model.generator(lowerCamelCase_ )
__magic_name__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowerCamelCase_ ) )
__magic_name__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowerCamelCase_ ) )
__magic_name__ = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
__magic_name__ : Dict =argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
__magic_name__ : Any =parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 664 | 0 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __lowercase ( unittest.TestCase ):
def __init__(self : Optional[Any] , snake_case : int , snake_case : Tuple=13 , snake_case : Optional[Any]=7 , snake_case : Dict=True , snake_case : Union[str, Any]=True , snake_case : Optional[Any]=True , snake_case : Dict=True , snake_case : Any=99 , snake_case : Tuple=32 , snake_case : Optional[Any]=5 , snake_case : Optional[Any]=4 , snake_case : Union[str, Any]=37 , snake_case : List[str]="gelu" , snake_case : Union[str, Any]=0.1 , snake_case : List[Any]=0.1 , snake_case : int=512 , snake_case : List[str]=16 , snake_case : int=2 , snake_case : Optional[Any]=0.02 , snake_case : Optional[Any]=4 , ) -> Any:
_lowercase : List[Any] = parent
_lowercase : int = batch_size
_lowercase : List[Any] = seq_length
_lowercase : List[Any] = is_training
_lowercase : Any = use_attention_mask
_lowercase : Tuple = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Optional[Any] = vocab_size
_lowercase : Tuple = hidden_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : str = hidden_act
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Tuple = max_position_embeddings
_lowercase : Union[str, Any] = type_vocab_size
_lowercase : Optional[Any] = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : Dict = num_choices
def _a(self : Dict ) -> Any:
_lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Dict = None
if self.use_attention_mask:
_lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Optional[int] = None
if self.use_token_type_ids:
_lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : List[Any] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a(self : Dict ) -> Union[str, Any]:
_lowercase : Union[str, Any] = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Tuple = config_and_inputs
_lowercase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class __lowercase ( __snake_case , unittest.TestCase ):
_A = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a(self : int ) -> int:
_lowercase : List[Any] = FlaxAlbertModelTester(self )
@slow
def _a(self : Tuple ) -> Optional[int]:
for model_class_name in self.all_model_classes:
_lowercase : List[str] = model_class_name.from_pretrained("albert-base-v2" )
_lowercase : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCamelCase )
@require_flax
class __lowercase ( unittest.TestCase ):
@slow
def _a(self : Union[str, Any] ) -> Union[str, Any]:
_lowercase : Dict = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_lowercase : str = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_lowercase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowercase : Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
_lowercase : int = (1, 11, 768)
self.assertEqual(output.shape , _lowerCamelCase )
_lowercase : Dict = np.array(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowerCamelCase , atol=1e-4 ) )
| 461 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ) -> str:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__magic_name__ = [[1, 2, 4], [1, 2, 3, 4]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
self.assertTrue(isinstance(dc.token_ids , _lowerCamelCase ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __A ( self : List[Any] ) -> str:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__magic_name__ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(_lowerCamelCase ) # fails here
def __A ( self : List[Any] ) -> int:
__magic_name__ = [[1, 2, 3], [1, 2, 4]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(3 )
__magic_name__ = stepped is True and completed is True and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __A ( self : Any ) -> Union[str, Any]:
__magic_name__ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 664 | 0 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__( self :Dict , lowercase :Any , lowercase :Union[str, Any]=1_3 , lowercase :Dict=7 , lowercase :str=True , lowercase :str=True , lowercase :Dict=True , lowercase :Tuple=True , lowercase :Dict=9_9 , lowercase :int=2_4 , lowercase :Optional[int]=2 , lowercase :Tuple=6 , lowercase :Any=3_7 , lowercase :Optional[int]="gelu" , lowercase :Dict=0.1 , lowercase :List[Any]=0.1 , lowercase :str=5_1_2 , lowercase :int=1_6 , lowercase :str=2 , lowercase :Tuple=0.02 , lowercase :Tuple=3 , lowercase :List[Any]=None , lowercase :str=1_0_0_0 , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = range_bbox
def snake_case__ ( self :List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE = bbox[i, j, 3]
SCREAMING_SNAKE_CASE = bbox[i, j, 1]
SCREAMING_SNAKE_CASE = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE = bbox[i, j, 2]
SCREAMING_SNAKE_CASE = bbox[i, j, 0]
SCREAMING_SNAKE_CASE = t
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case__ ( self :Optional[Any] ) -> List[Any]:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def snake_case__ ( self :List[Any] , lowercase :List[Any] , lowercase :Any , lowercase :int , lowercase :Optional[Any] , lowercase :Optional[Any] , lowercase :Dict , lowercase :Optional[int] , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = LiltModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
SCREAMING_SNAKE_CASE = model(_lowerCamelCase , bbox=_lowerCamelCase , token_type_ids=_lowerCamelCase )
SCREAMING_SNAKE_CASE = model(_lowerCamelCase , bbox=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case__ ( self :str , lowercase :Any , lowercase :int , lowercase :List[Any] , lowercase :Any , lowercase :str , lowercase :Optional[int] , lowercase :List[Any] , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = LiltForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(
_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self :Any , lowercase :Any , lowercase :str , lowercase :Any , lowercase :Tuple , lowercase :Any , lowercase :Optional[int] , lowercase :Tuple , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = LiltForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(
_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
UpperCamelCase_ : Optional[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Optional[Any] = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Dict = False
def snake_case__ ( self :str , lowercase :Union[str, Any] , lowercase :Any , lowercase :Any , lowercase :Tuple , lowercase :List[Any] ) -> int:
"""simple docstring"""
return True
def snake_case__ ( self :List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = LiltModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=3_7 )
def snake_case__ ( self :Dict ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self :str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def snake_case__ ( self :List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*_lowerCamelCase )
def snake_case__ ( self :List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase )
def snake_case__ ( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase )
@slow
def snake_case__ ( self :List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = LiltModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@require_torch
@slow
class lowerCamelCase ( unittest.TestCase ):
def snake_case__ ( self :Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([[1, 2]] , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(input_ids=_lowerCamelCase , bbox=_lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.Size([1, 2, 7_6_8] )
SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=_lowerCamelCase , )
self.assertTrue(outputs.last_hidden_state.shape , _lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _lowerCamelCase , atol=1e-3 ) ) | 201 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__magic_name__ : Dict ={
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_28,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls : Any ) -> Union[str, Any]:
__magic_name__ = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def __A ( cls : Any ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def __A ( self : Optional[Any] ) -> Dict:
__magic_name__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowerCamelCase , repo_id="test-config" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : str ) -> Optional[int]:
__magic_name__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_lowerCamelCase , repo_id="valid_org/test-config-org" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : Optional[int] ) -> Union[str, Any]:
CustomConfig.register_for_auto_class()
__magic_name__ = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
__magic_name__ = AutoConfig.from_pretrained(f'{USER}/test-dynamic-config' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Optional[int] ) -> Optional[Any]:
__magic_name__ = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__magic_name__ = c.n_embd + 1 # int
__magic_name__ = c.resid_pdrop + 1.0 # float
__magic_name__ = not c.scale_attn_weights # bool
__magic_name__ = c.summary_type + "foo" # str
c.update_from_string(
f'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(_lowerCamelCase , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(_lowerCamelCase , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(_lowerCamelCase , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(_lowerCamelCase , c.summary_type , "mismatch for key: summary_type" )
def __A ( self : List[Any] ) -> Union[str, Any]:
__magic_name__ = PretrainedConfig()
__magic_name__ = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_lowerCamelCase , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
__magic_name__ = [key for key, value in config_common_kwargs.items() if value == getattr(_lowerCamelCase , _lowerCamelCase )]
if len(_lowerCamelCase ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f' {", ".join(_lowerCamelCase )}.' )
def __A ( self : List[Any] ) -> List[Any]:
with self.assertRaises(_lowerCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(_lowerCamelCase )
def __A ( self : Tuple ) -> int:
# A mock response for an HTTP head request to emulate server down
__magic_name__ = mock.Mock()
__magic_name__ = 5_00
__magic_name__ = {}
__magic_name__ = HTTPError
__magic_name__ = {}
# Download this model to make sure it's in the cache.
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=_lowerCamelCase ) as mock_head:
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def __A ( self : Union[str, Any] ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
__magic_name__ = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def __A ( self : Dict ) -> Optional[int]:
__magic_name__ = AutoConfig.from_pretrained("bert-base-cased" )
__magic_name__ = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_lowerCamelCase )
__magic_name__ = 2
json.dump(configuration.to_dict() , open(os.path.join(_lowerCamelCase , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__magic_name__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__magic_name__ = ["config.42.0.0.json"]
__magic_name__ = 7_68
configuration.save_pretrained(_lowerCamelCase )
shutil.move(os.path.join(_lowerCamelCase , "config.4.0.0.json" ) , os.path.join(_lowerCamelCase , "config.42.0.0.json" ) )
__magic_name__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def __A ( self : Optional[int] ) -> str:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__magic_name__ = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
__magic_name__ = "v4.0.0"
__magic_name__ , __magic_name__ = new_transformers.models.auto.AutoConfig.from_pretrained(
_lowerCamelCase , return_unused_kwargs=_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_lowerCamelCase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__magic_name__ = "v3.0.0"
__magic_name__ = old_transformers.models.auto.AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 664 | 0 |
'''simple docstring'''
from torch import nn
class UpperCamelCase__ (nn.Module ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ):
super().__init__()
lowerCamelCase__ = class_size
lowerCamelCase__ = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
lowerCamelCase__ = nn.Linear(_lowerCamelCase ,_lowerCamelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
lowerCamelCase__ = self.mlp(_lowerCamelCase )
return logits
| 50 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , _lowerCamelCase : str , _lowerCamelCase : Optional[Any]=7 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : List[Any]=18 , _lowerCamelCase : Union[str, Any]=30 , _lowerCamelCase : Tuple=4_00 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=True , _lowerCamelCase : Dict=[0.5, 0.5, 0.5] , _lowerCamelCase : Dict=[0.5, 0.5, 0.5] , ) -> Dict:
__magic_name__ = size if size is not None else {"height": 18, "width": 18}
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = min_resolution
__magic_name__ = max_resolution
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = do_normalize
__magic_name__ = image_mean
__magic_name__ = image_std
def __A ( self : int ) -> List[str]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase_ ( A , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = DPTImageProcessor if is_vision_available() else None
def __A ( self : Dict ) -> Any:
__magic_name__ = DPTImageProcessingTester(self )
@property
def __A ( self : str ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Tuple ) -> List[str]:
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "size" ) )
def __A ( self : List[str] ) -> List[Any]:
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __A ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Dict ) -> Optional[Any]:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Optional[int] ) -> Dict:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 664 | 0 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase_ : Dict = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
UpperCAmelCase_ : int = 10
UpperCAmelCase_ : Union[str, Any] = 256
def UpperCAmelCase_ ( A ):
'''simple docstring'''
if len(lowerCamelCase_ ) < MIN_NUM_TOKENS:
return None
_a : Dict = MinHash(num_perm=lowerCamelCase_ )
for token in set(lowerCamelCase_ ):
min_hash.update(token.encode() )
return min_hash
def UpperCAmelCase_ ( A ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(lowerCamelCase_ ) if len(t.strip() ) > 0}
class a :
'''simple docstring'''
def __init__( self , *,
lowerCamelCase_ = 0.85 , ) -> Optional[Any]:
_a : str = duplication_jaccard_threshold
_a : Union[str, Any] = NUM_PERM
_a : str = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_a : str = defaultdict(_lowerCamelCase )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
_a : List[str] = self._index.query(_lowerCamelCase )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_lowerCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_lowerCamelCase )
def __UpperCamelCase ( self ) -> List[List[Dict]]:
_a : Optional[int] = []
for base, duplicates in self._duplicate_clusters.items():
_a : Optional[int] = [base] + list(_lowerCamelCase )
# reformat the cluster to be a list of dict
_a : Any = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(_lowerCamelCase )
return duplicate_clusters
def __UpperCamelCase ( self , lowerCamelCase_ ) -> None:
_a : Any = self.get_duplicate_clusters()
with open(_lowerCamelCase , 'w' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( A ):
'''simple docstring'''
_a , _a : Union[str, Any] = element
_a : Tuple = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCAmelCase_ ( A ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCamelCase_ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
_a : Optional[int] = DuplicationIndex(duplication_jaccard_threshold=lowerCamelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCamelCase_ ) ) , max_queue_size=1_0_0 ) ):
di.add(lowerCamelCase_ , lowerCamelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
_a : str = get_tokens(lowerCamelCase_ )
_a : Optional[Any] = get_tokens(lowerCamelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase_ : List[str] = None
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
_a : Tuple = []
for elementa in cluster:
_a : Any = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
_a : Any = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(lowerCamelCase_ , lowerCamelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_a : List[str] = 1
extremes.append(lowerCamelCase_ )
return extremes
def UpperCAmelCase_ ( A , A , A ):
'''simple docstring'''
global _shared_dataset
_a : Dict = dataset
_a : Any = []
_a : int = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCamelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCamelCase_ , lowerCamelCase_ , ) , total=len(lowerCamelCase_ ) , ):
extremes_list.append(lowerCamelCase_ )
return extremes_list
def UpperCAmelCase_ ( A , A = 0.85 ):
'''simple docstring'''
_a : List[Any] = make_duplicate_clusters(lowerCamelCase_ , lowerCamelCase_ )
_a : List[Any] = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
_a : str = {}
_a : Union[str, Any] = find_extremes(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for extremes in extremes_clusters:
for element in extremes:
_a : int = element
_a : Union[str, Any] = duplicate_indices - set(extreme_dict.keys() )
_a : Dict = dataset.filter(lambda A , A : idx not in remove_indices , with_indices=lowerCamelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_a : Tuple = element['base_index'] in extreme_dict
if element["is_extreme"]:
_a : Optional[Any] = extreme_dict[element['base_index']]['copies']
print(f'''Original dataset size: {len(lowerCamelCase_ )}''' )
print(f'''Number of duplicate clusters: {len(lowerCamelCase_ )}''' )
print(f'''Files in duplicate cluster: {len(lowerCamelCase_ )}''' )
print(f'''Unique files in duplicate cluster: {len(lowerCamelCase_ )}''' )
print(f'''Filtered dataset size: {len(lowerCamelCase_ )}''' )
return ds_filter, duplicate_clusters
| 120 |
'''simple docstring'''
import numpy
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCamelCase : numpy.ndarray , _lowerCamelCase : numpy.ndarray ) -> None:
__magic_name__ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__magic_name__ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__magic_name__ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__magic_name__ = numpy.random.rand(3 , 1 )
# Real output values provided.
__magic_name__ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__magic_name__ = numpy.zeros(output_array.shape )
def __A ( self : int ) -> numpy.ndarray:
__magic_name__ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __A ( self : Dict ) -> None:
__magic_name__ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__magic_name__ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__magic_name__ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __A ( self : Optional[int] , _lowerCamelCase : numpy.ndarray , _lowerCamelCase : int , _lowerCamelCase : bool ) -> None:
for iteration in range(1 , iterations + 1 ):
__magic_name__ = self.feedforward()
self.back_propagation()
if give_loss:
__magic_name__ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'Iteration {iteration} Loss: {loss}' )
def __A ( self : Tuple , _lowerCamelCase : numpy.ndarray ) -> int:
__magic_name__ = input_arr
__magic_name__ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def __snake_case ( lowerCamelCase_ : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def __snake_case ( lowerCamelCase_ : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__magic_name__ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__magic_name__ = TwoHiddenLayerNeuralNetwork(
input_array=lowerCamelCase_ , output_array=lowerCamelCase_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=lowerCamelCase_ , iterations=10 , give_loss=lowerCamelCase_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 664 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( UpperCAmelCase_ ):
lowercase__ : Optional[Any] = ['''image_processor''', '''tokenizer''']
lowercase__ : List[str] = '''BlipImageProcessor'''
lowercase__ : Tuple = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = False
super().__init__(_lowerCamelCase , _lowerCamelCase )
A__ = self.image_processor
def __call__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = 0 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = True , UpperCamelCase__ = None , **UpperCamelCase__ , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
A__ = self.tokenizer
A__ = self.tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
return text_encoding
# add pixel_values
A__ = self.image_processor(_lowerCamelCase , return_tensors=_lowerCamelCase )
if text is not None:
A__ = self.tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
else:
A__ = None
if text_encoding is not None:
encoding_image_processor.update(_lowerCamelCase )
return encoding_image_processor
def lowercase_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def lowercase_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.tokenizer.model_input_names
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 337 |
'''simple docstring'''
import torch
from transformers import AutoModel
class UpperCamelCase_ ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Any , _lowerCamelCase : Optional[int]="sayef/fsner-bert-base-uncased" ) -> List[Any]:
super(_lowerCamelCase , self ).__init__()
__magic_name__ = AutoModel.from_pretrained(_lowerCamelCase , return_dict=_lowerCamelCase )
__magic_name__ = torch.nn.CosineSimilarity(3 , 1e-08 )
__magic_name__ = torch.nn.Softmax(dim=1 )
def __A ( self : Tuple , **_lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return self.bert(**_lowerCamelCase ).last_hidden_state
def __A ( self : Dict , _lowerCamelCase : Dict ) -> Dict:
return token_embeddings.sum(2 , keepdim=_lowerCamelCase )
def __A ( self : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Tuple=1 ) -> Optional[Any]:
return self.softmax(T * self.cos(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] ) -> List[str]:
__magic_name__ = W_supports["sizes"].tolist()
__magic_name__ = W_supports["start_token_id"].item()
__magic_name__ = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__magic_name__ = self.BERT(**_lowerCamelCase )
__magic_name__ = self.BERT(**_lowerCamelCase )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = W_supports["input_ids"] == start_token_id
__magic_name__ = W_supports["input_ids"] == end_token_id
for i, size in enumerate(_lowerCamelCase ):
if i == 0:
__magic_name__ = 0
else:
__magic_name__ = support_sizes[i - 1]
__magic_name__ = S[s : s + size][start_token_masks[s : s + size]]
__magic_name__ = S[s : s + size][end_token_masks[s : s + size]]
__magic_name__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__magic_name__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__magic_name__ = torch.vstack((p_starts, p_start) )
__magic_name__ = torch.vstack((p_ends, p_end) )
else:
__magic_name__ = p_start
__magic_name__ = p_end
return p_starts, p_ends
| 664 | 0 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
'''simple docstring'''
@staticmethod
def _lowercase ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
pass
@is_pipeline_test
@require_vision
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@require_torch
def _lowercase ( self ) -> List[str]:
lowerCamelCase : List[str] = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
lowerCamelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowerCamelCase : Any = image_classifier(_lowerCamelCase , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_lowerCamelCase ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
lowerCamelCase : List[Any] = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[
{"score": 0.333, "label": ANY(_lowerCamelCase )},
{"score": 0.333, "label": ANY(_lowerCamelCase )},
{"score": 0.333, "label": ANY(_lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(_lowerCamelCase )},
{"score": 0.333, "label": ANY(_lowerCamelCase )},
{"score": 0.333, "label": ANY(_lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(_lowerCamelCase )},
{"score": 0.333, "label": ANY(_lowerCamelCase )},
{"score": 0.333, "label": ANY(_lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(_lowerCamelCase )},
{"score": 0.333, "label": ANY(_lowerCamelCase )},
{"score": 0.333, "label": ANY(_lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(_lowerCamelCase )},
{"score": 0.333, "label": ANY(_lowerCamelCase )},
{"score": 0.333, "label": ANY(_lowerCamelCase )},
],
] , )
@require_tf
def _lowercase ( self ) -> int:
lowerCamelCase : Tuple = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
lowerCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowerCamelCase : Optional[Any] = image_classifier(_lowerCamelCase , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
lowerCamelCase : Any = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[
{"score": 0.333, "label": ANY(_lowerCamelCase )},
{"score": 0.333, "label": ANY(_lowerCamelCase )},
{"score": 0.333, "label": ANY(_lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(_lowerCamelCase )},
{"score": 0.333, "label": ANY(_lowerCamelCase )},
{"score": 0.333, "label": ANY(_lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(_lowerCamelCase )},
{"score": 0.333, "label": ANY(_lowerCamelCase )},
{"score": 0.333, "label": ANY(_lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(_lowerCamelCase )},
{"score": 0.333, "label": ANY(_lowerCamelCase )},
{"score": 0.333, "label": ANY(_lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(_lowerCamelCase )},
{"score": 0.333, "label": ANY(_lowerCamelCase )},
{"score": 0.333, "label": ANY(_lowerCamelCase )},
],
] , )
@slow
@require_torch
def _lowercase ( self ) -> List[str]:
lowerCamelCase : Any = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowerCamelCase : List[str] = image_classifier(_lowerCamelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
lowerCamelCase : Union[str, Any] = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def _lowercase ( self ) -> str:
lowerCamelCase : Any = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowerCamelCase : Union[str, Any] = image_classifier(_lowerCamelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
lowerCamelCase : Union[str, Any] = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 311 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 664 | 0 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
"""simple docstring"""
_A = VQModel
_A = '''sample'''
@property
def _a (self , __a=(32, 32) ):
'''simple docstring'''
lowerCamelCase = 4
lowerCamelCase = 3
lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCamelCase )
return {"sample": image}
@property
def _a (self ):
'''simple docstring'''
return (3, 32, 32)
@property
def _a (self ):
'''simple docstring'''
return (3, 32, 32)
def _a (self ):
'''simple docstring'''
lowerCamelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
'''simple docstring'''
pass
def _a (self ):
'''simple docstring'''
pass
def _a (self ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(_lowerCamelCase )
lowerCamelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _a (self ):
'''simple docstring'''
lowerCamelCase = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(_lowerCamelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCamelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
lowerCamelCase = image.to(_lowerCamelCase )
with torch.no_grad():
lowerCamelCase = model(_lowerCamelCase ).sample
lowerCamelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCamelCase = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 ) ) | 623 |
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def __snake_case ( lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
__magic_name__ = AutoConfig.from_pretrained(lowerCamelCase_ )
__magic_name__ = FlaxAutoModelForSeqaSeqLM.from_config(config=lowerCamelCase_ )
__magic_name__ = checkpoints.load_tax_checkpoint(lowerCamelCase_ )
__magic_name__ = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
__magic_name__ = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
__magic_name__ = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
__magic_name__ = F'layers_{str(lowerCamelCase_ )}'
# Self-Attention
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
__magic_name__ = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
__magic_name__ = flax_model.params["encoder"]["block"][str(lowerCamelCase_ )]["layer"]
__magic_name__ = tax_attention_key
__magic_name__ = tax_attention_out
__magic_name__ = tax_attention_query
__magic_name__ = tax_attention_value
__magic_name__ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = tax_global_layer_norm
if split_mlp_wi:
__magic_name__ = tax_mlp_wi_a
__magic_name__ = tax_mlp_wi_a
else:
__magic_name__ = tax_mlp_wi
__magic_name__ = tax_mlp_wo
__magic_name__ = tax_mlp_layer_norm
__magic_name__ = flax_model_encoder_layer_block
# Only for layer 0:
__magic_name__ = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
__magic_name__ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
__magic_name__ = tax_encoder_global_rel_embedding
# Assigning
__magic_name__ = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
__magic_name__ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
__magic_name__ = F'layers_{str(lowerCamelCase_ )}'
# Self-Attention
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
__magic_name__ = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
__magic_name__ = tax_enc_dec_attention_module["key"]["kernel"]
__magic_name__ = tax_enc_dec_attention_module["out"]["kernel"]
__magic_name__ = tax_enc_dec_attention_module["query"]["kernel"]
__magic_name__ = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
__magic_name__ = flax_model.params["decoder"]["block"][str(lowerCamelCase_ )]["layer"]
__magic_name__ = tax_attention_key
__magic_name__ = tax_attention_out
__magic_name__ = tax_attention_query
__magic_name__ = tax_attention_value
__magic_name__ = tax_pre_attention_layer_norm
__magic_name__ = tax_enc_dec_attention_key
__magic_name__ = tax_enc_dec_attention_out
__magic_name__ = tax_enc_dec_attention_query
__magic_name__ = tax_enc_dec_attention_value
__magic_name__ = tax_cross_layer_norm
if split_mlp_wi:
__magic_name__ = tax_mlp_wi_a
__magic_name__ = tax_mlp_wi_a
else:
__magic_name__ = tax_mlp_wi
__magic_name__ = tax_mlp_wo
__magic_name__ = txa_mlp_layer_norm
__magic_name__ = flax_model_decoder_layer_block
# Decoder Normalization
__magic_name__ = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
__magic_name__ = txa_decoder_norm
# Only for layer 0:
__magic_name__ = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
__magic_name__ = tax_decoder_rel_embedding
# Token Embeddings
__magic_name__ = tax_model["target"]["token_embedder"]["embedding"]
__magic_name__ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
__magic_name__ = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(lowerCamelCase_ )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
__magic_name__ : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
__magic_name__ : Optional[int] =parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 664 | 0 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
def _lowercase ( self : Dict ):
snake_case__ : Any = SMALL_MODEL_IDENTIFIER
snake_case__ : Dict = "pt"
snake_case__ : Tuple = "tf"
def _lowercase ( self : int , __A : Tuple ):
snake_case__ : List[Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowerCamelCase )
def _lowercase ( self : Any , __A : List[str] ):
snake_case__ : Optional[Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowerCamelCase )
model_tf.save_pretrained(_lowerCamelCase )
def _lowercase ( self : Dict ):
snake_case__ : Union[str, Any] = "mock_framework"
# Framework provided - return whatever the user provides
snake_case__ : Any = FeaturesManager.determine_framework(self.test_model , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCamelCase )
snake_case__ : int = FeaturesManager.determine_framework(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCamelCase )
snake_case__ : int = FeaturesManager.determine_framework(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _lowercase ( self : Dict ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCamelCase )
snake_case__ : List[str] = FeaturesManager.determine_framework(_lowerCamelCase )
self.assertEqual(_lowerCamelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCamelCase )
snake_case__ : int = FeaturesManager.determine_framework(_lowerCamelCase )
self.assertEqual(_lowerCamelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowerCamelCase ):
snake_case__ : List[str] = FeaturesManager.determine_framework(_lowerCamelCase )
def _lowercase ( self : Tuple ):
snake_case__ : Optional[Any] = MagicMock(return_value=_lowerCamelCase )
with patch("transformers.onnx.features.is_tf_available" , _lowerCamelCase ):
snake_case__ : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCamelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
snake_case__ : List[str] = MagicMock(return_value=_lowerCamelCase )
with patch("transformers.onnx.features.is_torch_available" , _lowerCamelCase ):
snake_case__ : List[str] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCamelCase , self.framework_tf )
# Both in environment -> use PyTorch
snake_case__ : List[str] = MagicMock(return_value=_lowerCamelCase )
snake_case__ : Tuple = MagicMock(return_value=_lowerCamelCase )
with patch("transformers.onnx.features.is_tf_available" , _lowerCamelCase ), patch(
"transformers.onnx.features.is_torch_available" , _lowerCamelCase ):
snake_case__ : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCamelCase , self.framework_pt )
# Both not in environment -> raise error
snake_case__ : int = MagicMock(return_value=_lowerCamelCase )
snake_case__ : List[Any] = MagicMock(return_value=_lowerCamelCase )
with patch("transformers.onnx.features.is_tf_available" , _lowerCamelCase ), patch(
"transformers.onnx.features.is_torch_available" , _lowerCamelCase ):
with self.assertRaises(_lowerCamelCase ):
snake_case__ : str = FeaturesManager.determine_framework(self.test_model )
| 297 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCamelCase_ ( unittest.TestCase , A ):
"""simple docstring"""
def __A ( self : Optional[int] ) -> Any:
__magic_name__ = load_tool("text-to-speech" )
self.tool.setup()
def __A ( self : Union[str, Any] ) -> int:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__magic_name__ = self.tool("hey" )
__magic_name__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def __A ( self : List[str] ) -> int:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__magic_name__ = self.tool("hey" )
__magic_name__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 664 | 0 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __lowercase ( *lowerCamelCase : List[str] ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase_ : Union[str, Any] = list(lowerCamelCase_ )
for i in range(len(lowerCamelCase_ ) ):
UpperCamelCase_ : Any = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __lowercase ( lowerCamelCase : Exception ):
UpperCamelCase_ : str = [
'CUDA out of memory.', # CUDA OOM
'cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.', # CUDNN SNAFU
'DefaultCPUAllocator: can\'t allocate memory', # CPU OOM
]
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __lowercase ( lowerCamelCase : callable = None , lowerCamelCase : int = 128 ):
if function is None:
return functools.partial(lowerCamelCase_ , starting_batch_size=lowerCamelCase_ )
UpperCamelCase_ : Union[str, Any] = starting_batch_size
def decorator(*lowerCamelCase : Dict , **lowerCamelCase : Optional[Any] ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
UpperCamelCase_ : Any = list(inspect.signature(lowerCamelCase_ ).parameters.keys() )
# Guard against user error
if len(lowerCamelCase_ ) < (len(lowerCamelCase_ ) + 1):
UpperCamelCase_ : List[Any] = ', '.join([F"{arg}={value}" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F"Batch size was passed into `{function.__name__}` as the first argument when called."
F"Remove this as the decorator already does so: `{function.__name__}({arg_str})`" )
while True:
if batch_size == 0:
raise RuntimeError('No executable batch size found, reached zero.' )
try:
return function(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
except Exception as e:
if should_reduce_batch_size(lowerCamelCase_ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 417 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__magic_name__ : Dict =re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
__magic_name__ : int =10
__magic_name__ : Union[str, Any] =2_56
def __snake_case ( lowerCamelCase_ : List[str] ):
'''simple docstring'''
if len(lowerCamelCase_ ) < MIN_NUM_TOKENS:
return None
__magic_name__ = MinHash(num_perm=lowerCamelCase_ )
for token in set(lowerCamelCase_ ):
min_hash.update(token.encode() )
return min_hash
def __snake_case ( lowerCamelCase_ : str ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(lowerCamelCase_ ) if len(t.strip() ) > 0}
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , *,
_lowerCamelCase : float = 0.85 , ) -> Optional[Any]:
__magic_name__ = duplication_jaccard_threshold
__magic_name__ = NUM_PERM
__magic_name__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__magic_name__ = defaultdict(_lowerCamelCase )
def __A ( self : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : MinHash ) -> None:
__magic_name__ = self._index.query(_lowerCamelCase )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_lowerCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_lowerCamelCase )
def __A ( self : Union[str, Any] ) -> List[List[Dict]]:
__magic_name__ = []
for base, duplicates in self._duplicate_clusters.items():
__magic_name__ = [base] + list(_lowerCamelCase )
# reformat the cluster to be a list of dict
__magic_name__ = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(_lowerCamelCase )
return duplicate_clusters
def __A ( self : Tuple , _lowerCamelCase : Tuple ) -> None:
__magic_name__ = self.get_duplicate_clusters()
with open(_lowerCamelCase , "w" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
def __snake_case ( lowerCamelCase_ : List[Any] ):
'''simple docstring'''
__magic_name__ , __magic_name__ = element
__magic_name__ = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def __snake_case ( lowerCamelCase_ : Type[Dataset] ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCamelCase_ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def __snake_case ( lowerCamelCase_ : Type[Dataset] , lowerCamelCase_ : float ):
'''simple docstring'''
__magic_name__ = DuplicationIndex(duplication_jaccard_threshold=lowerCamelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCamelCase_ ) ) , max_queue_size=100 ) ):
di.add(lowerCamelCase_ , lowerCamelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
'''simple docstring'''
__magic_name__ = get_tokens(lowerCamelCase_ )
__magic_name__ = get_tokens(lowerCamelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__magic_name__ : List[str] =None
def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
__magic_name__ = []
for elementa in cluster:
__magic_name__ = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
__magic_name__ = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(lowerCamelCase_ , lowerCamelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__magic_name__ = 1
extremes.append(lowerCamelCase_ )
return extremes
def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
global _shared_dataset
__magic_name__ = dataset
__magic_name__ = []
__magic_name__ = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCamelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCamelCase_ , lowerCamelCase_ , ) , total=len(lowerCamelCase_ ) , ):
extremes_list.append(lowerCamelCase_ )
return extremes_list
def __snake_case ( lowerCamelCase_ : Type[Dataset] , lowerCamelCase_ : float = 0.85 ):
'''simple docstring'''
__magic_name__ = make_duplicate_clusters(lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
__magic_name__ = {}
__magic_name__ = find_extremes(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for extremes in extremes_clusters:
for element in extremes:
__magic_name__ = element
__magic_name__ = duplicate_indices - set(extreme_dict.keys() )
__magic_name__ = dataset.filter(lambda lowerCamelCase_ , lowerCamelCase_ : idx not in remove_indices , with_indices=lowerCamelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__magic_name__ = element["base_index"] in extreme_dict
if element["is_extreme"]:
__magic_name__ = extreme_dict[element["base_index"]]["copies"]
print(F'Original dataset size: {len(lowerCamelCase_ )}' )
print(F'Number of duplicate clusters: {len(lowerCamelCase_ )}' )
print(F'Files in duplicate cluster: {len(lowerCamelCase_ )}' )
print(F'Unique files in duplicate cluster: {len(lowerCamelCase_ )}' )
print(F'Filtered dataset size: {len(lowerCamelCase_ )}' )
return ds_filter, duplicate_clusters
| 664 | 0 |
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
UpperCamelCase_ = 5_00_03
UpperCamelCase_ = 5_00_02
@require_sentencepiece
@require_tokenizers
class a_ (_a , unittest.TestCase ):
__lowerCAmelCase : Dict = PLBartTokenizer
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : str = False
def __UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : Dict = PLBartTokenizer(_lowerCamelCase , language_codes="""base""" , keep_accents=_lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = PLBartTokenizer(_lowerCamelCase , language_codes="""base""" , keep_accents=_lowerCamelCase )
_lowerCAmelCase : List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_lowerCAmelCase : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase : Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_lowerCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
_lowerCAmelCase : Optional[int] = tokenizer.vocab_size
_lowerCAmelCase : List[Any] = [tokenizer.convert_ids_to_tokens(_lowerCamelCase ) for x in range(end - 4 , _lowerCamelCase )]
self.assertListEqual(_lowerCamelCase , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
_lowerCAmelCase : List[str] = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
_lowerCAmelCase : List[str] = tokenizer(_lowerCamelCase ).input_ids
self.assertEqual(
tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase ) , _lowerCamelCase , )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = PLBartTokenizer(_lowerCamelCase , language_codes="""multi""" , keep_accents=_lowerCamelCase )
_lowerCAmelCase : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_lowerCAmelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_lowerCAmelCase : Any = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
_lowerCAmelCase : Optional[Any] = tokenizer.vocab_size
_lowerCAmelCase : Union[str, Any] = [tokenizer.convert_ids_to_tokens(_lowerCamelCase ) for x in range(end - 7 , _lowerCamelCase )]
self.assertListEqual(
_lowerCamelCase , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
_lowerCAmelCase : str = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
_lowerCAmelCase : List[Any] = tokenizer(_lowerCamelCase ).input_ids
self.assertEqual(
tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase ) , _lowerCamelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ (unittest.TestCase ):
__lowerCAmelCase : int = '''uclanlp/plbart-python-en_XX'''
__lowerCAmelCase : Any = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
__lowerCAmelCase : Union[str, Any] = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
__lowerCAmelCase : int = [
1_3_4,
5_4_5_2,
3_3_4_6_0,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
9_8_8,
2_0,
3_3_4_5_6,
1_9,
3_3_4_5_6,
7_7_1,
3_9,
4_2_5_8,
8_8_9,
3_3_1_8,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
2_4_7_1,
2,
PYTHON_CODE,
]
@classmethod
def __UpperCamelCase ( cls ):
_lowerCAmelCase : Dict = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" )
_lowerCAmelCase : Any = 1
return cls
def __UpperCamelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_0_0_0_3 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _lowerCamelCase )
def __UpperCamelCase ( self ):
self.assertIn(_lowerCamelCase , self.tokenizer.all_special_ids )
_lowerCAmelCase : Optional[Any] = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
_lowerCAmelCase : List[Any] = self.tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
_lowerCAmelCase : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token , _lowerCamelCase )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 2_0]
self.assertIsInstance(src_text[0] , _lowerCamelCase )
_lowerCAmelCase : Any = 1_0
_lowerCAmelCase : Union[str, Any] = self.tokenizer(_lowerCamelCase , max_length=_lowerCamelCase , truncation=_lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
def __UpperCamelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [5_0_0_0_4, 5_0_0_0_1] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = tempfile.mkdtemp()
_lowerCAmelCase : List[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowerCamelCase )
_lowerCAmelCase : str = PLBartTokenizer.from_pretrained(_lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowerCamelCase )
@require_torch
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_lowerCamelCase , return_tensors="""pt""" )
_lowerCAmelCase : List[str] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _lowerCamelCase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
_lowerCAmelCase : Optional[int] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual((2, 2_6) , batch.input_ids.shape )
self.assertEqual((2, 2_6) , batch.attention_mask.shape )
_lowerCAmelCase : int = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _lowerCamelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=3 , return_tensors="""pt""" )
_lowerCAmelCase : Any = self.tokenizer(
text_target=self.tgt_text , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=1_0 , return_tensors="""pt""" )
_lowerCAmelCase : List[str] = targets["""input_ids"""]
_lowerCAmelCase : Union[str, Any] = shift_tokens_right(_lowerCamelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , {
# A, test, EOS, en_XX
"""input_ids""": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 5_0_0_0_1,
} , )
| 384 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
__magic_name__ : Optional[int] =logging.get_logger(__name__)
__magic_name__ : Tuple ='The Nymphenburg Palace is a beautiful palace in Munich!'
def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
'''simple docstring'''
__magic_name__ = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1e-5,
"token_type_vocab_size": 2,
}
__magic_name__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__magic_name__ = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , lowerCamelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__magic_name__ = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
__magic_name__ = os.path.join(get_home_dir() , "models" )
__magic_name__ = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ )
__magic_name__ = nlp.model.BERTModel(
lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , )
original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ )
__magic_name__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
__magic_name__ = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(lowerCamelCase_ ),
}
__magic_name__ = BertConfig.from_dict(lowerCamelCase_ )
__magic_name__ = BertForMaskedLM(lowerCamelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase_ : Any ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int ):
__magic_name__ = hf_param.shape
__magic_name__ = to_torch(params[gluon_param] )
__magic_name__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__magic_name__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__magic_name__ = hf_bort_model.bert.encoder.layer[i]
# self attention
__magic_name__ = layer.attention.self
__magic_name__ = check_and_map_params(
self_attn.key.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
__magic_name__ = check_and_map_params(
self_attn.key.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
__magic_name__ = check_and_map_params(
self_attn.query.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
__magic_name__ = check_and_map_params(
self_attn.query.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
__magic_name__ = check_and_map_params(
self_attn.value.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
__magic_name__ = check_and_map_params(
self_attn.value.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
__magic_name__ = layer.attention.output
__magic_name__ = check_and_map_params(
self_output.dense.bias , F'encoder.transformer_cells.{i}.proj.bias' )
__magic_name__ = check_and_map_params(
self_output.dense.weight , F'encoder.transformer_cells.{i}.proj.weight' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.layer_norm.beta' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
__magic_name__ = layer.intermediate
__magic_name__ = check_and_map_params(
intermediate.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
__magic_name__ = check_and_map_params(
intermediate.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
__magic_name__ = layer.output
__magic_name__ = check_and_map_params(
bert_output.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
__magic_name__ = check_and_map_params(
bert_output.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__magic_name__ = RobertaTokenizer.from_pretrained("roberta-base" )
__magic_name__ = tokenizer.encode_plus(lowerCamelCase_ )["input_ids"]
# Get gluon output
__magic_name__ = mx.nd.array([input_ids] )
__magic_name__ = original_bort(inputs=lowerCamelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase_ )
__magic_name__ = BertModel.from_pretrained(lowerCamelCase_ )
hf_bort_model.eval()
__magic_name__ = tokenizer.encode_plus(lowerCamelCase_ , return_tensors="pt" )
__magic_name__ = hf_bort_model(**lowerCamelCase_ )[0]
__magic_name__ = output_gluon[0].asnumpy()
__magic_name__ = output_hf[0].detach().numpy()
__magic_name__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__magic_name__ = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , lowerCamelCase_ )
if __name__ == "__main__":
__magic_name__ : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ : Optional[Any] =parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 664 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 169 |
'''simple docstring'''
def __snake_case ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
__magic_name__ = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
__magic_name__ = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
__magic_name__ = max(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase_ ) , b_binary.zfill(lowerCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 664 | 0 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCamelCase_ : str = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
UpperCamelCase_ : Optional[Any] = []
UpperCamelCase_ : str = []
UpperCamelCase_ : int = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
UpperCamelCase_ : str = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''',
'emoji': True,
},
}
]
UpperCamelCase_ : Any = 0
for log in Path().glob("""*.log"""):
UpperCamelCase_ : List[Any] = 0
with open(log, """r""") as f:
for line in f:
UpperCamelCase_ : List[Any] = json.loads(line)
if line.get("""nodeid""", """""") != "":
UpperCamelCase_ : List[Any] = line['nodeid']
if line.get("""duration""", None) is not None:
UpperCamelCase_ : Any = f'''{line["duration"]:.4f}'''
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCamelCase_ : int = []
log.unlink()
UpperCamelCase_ : Dict = ''
UpperCamelCase_ : Dict = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
UpperCamelCase_ : Any = []
UpperCamelCase_ : List[str] = {}
for test in failed_tests:
UpperCamelCase_ : Optional[Any] = test[0].split("""::""")
UpperCamelCase_ : Tuple = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
UpperCamelCase_ : Union[str, Any] = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCamelCase_ : Optional[int] = [test[0] for test in failed_table]
UpperCamelCase_ : Dict = list(set(files))
# Count number of instances in failed_tests
UpperCamelCase_ : Dict = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCamelCase_ : int = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
UpperCamelCase_ : Union[str, Any] = 'Too many failed tests, please see the full report in the Action results.'
UpperCamelCase_ : Union[str, Any] = len(err) + 10
UpperCamelCase_ : str = message[: 3000 - offset] + f'''\n...\n```\n{err}'''
print(f'''### {message}''')
else:
UpperCamelCase_ : Dict = 'No failed tests! 🤗'
print(f'''## {message}''')
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
UpperCamelCase_ : Optional[int] = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
UpperCamelCase_ : str = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
UpperCamelCase_ : Tuple = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
payload.append(action_button)
UpperCamelCase_ : int = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''',
}
],
}
payload.append(date_report)
UpperCamelCase_ : int = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
UpperCamelCase_ : List[str] = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCamelCase_ : Dict = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCamelCase_ : Union[str, Any] = row[0]
else:
UpperCamelCase_ : List[Any] = ''
UpperCamelCase_ : Tuple = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''',
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 461 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__magic_name__ : Tuple =threading.Lock()
__magic_name__ : Optional[logging.Handler] =None
__magic_name__ : List[str] ={
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
__magic_name__ : str =logging.WARNING
__magic_name__ : Any =True
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = os.getenv("TRANSFORMERS_VERBOSITY" , lowerCamelCase_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def __snake_case ( ):
'''simple docstring'''
return __name__.split("." )[0]
def __snake_case ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def __snake_case ( ):
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__magic_name__ = logging.StreamHandler() # Set sys.stderr as stream.
__magic_name__ = sys.stderr.flush
# Apply our default configuration to the library root logger.
__magic_name__ = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__magic_name__ = False
def __snake_case ( ):
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
__magic_name__ = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__magic_name__ = None
def __snake_case ( ):
'''simple docstring'''
return log_levels
def __snake_case ( lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
if name is None:
__magic_name__ = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __snake_case ( lowerCamelCase_ : int ):
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __snake_case ( lowerCamelCase_ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(lowerCamelCase_ )
def __snake_case ( lowerCamelCase_ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
__magic_name__ = False
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
__magic_name__ = True
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = _get_library_root_logger().handlers
for handler in handlers:
__magic_name__ = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(lowerCamelCase_ )
def __snake_case ( self : Union[str, Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : Any ):
'''simple docstring'''
__magic_name__ = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , lowerCamelCase_ )
if no_advisory_warnings:
return
self.warning(*lowerCamelCase_ , **lowerCamelCase_ )
__magic_name__ : int =warning_advice
@functools.lru_cache(lowerCamelCase_ )
def __snake_case ( self : Dict , *lowerCamelCase_ : int , **lowerCamelCase_ : int ):
'''simple docstring'''
self.warning(*lowerCamelCase_ , **lowerCamelCase_ )
__magic_name__ : Optional[int] =warning_once
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , *_lowerCamelCase : Tuple , **_lowerCamelCase : Optional[Any] ) -> Any: # pylint: disable=unused-argument
__magic_name__ = args[0] if args else None
def __iter__( self : int ) -> Tuple:
return iter(self._iterator )
def __getattr__( self : List[Any] , _lowerCamelCase : int ) -> List[Any]:
def empty_fn(*_lowerCamelCase : List[str] , **_lowerCamelCase : List[str] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[Any] ) -> Any:
return self
def __exit__( self : int , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] ) -> Dict:
return
class UpperCamelCase_ :
"""simple docstring"""
def __call__( self : Any , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Any ) -> List[Any]:
if _tqdm_active:
return tqdm_lib.tqdm(*_lowerCamelCase , **_lowerCamelCase )
else:
return EmptyTqdm(*_lowerCamelCase , **_lowerCamelCase )
def __A ( self : Optional[Any] , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Dict ) -> Union[str, Any]:
__magic_name__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_lowerCamelCase , **_lowerCamelCase )
def __A ( self : str ) -> Any:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__magic_name__ : List[Any] =_tqdm_cls()
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
__magic_name__ = True
hf_hub_utils.enable_progress_bars()
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
__magic_name__ = False
hf_hub_utils.disable_progress_bars()
| 664 | 0 |
import numpy
class lowerCamelCase :
def __init__( self :Union[str, Any] , lowercase :numpy.ndarray , lowercase :numpy.ndarray ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
SCREAMING_SNAKE_CASE = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
SCREAMING_SNAKE_CASE = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
SCREAMING_SNAKE_CASE = numpy.random.rand(3 , 1 )
# Real output values provided.
SCREAMING_SNAKE_CASE = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
SCREAMING_SNAKE_CASE = numpy.zeros(output_array.shape )
def snake_case__ ( self :int ) -> numpy.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def snake_case__ ( self :Dict ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
SCREAMING_SNAKE_CASE = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
SCREAMING_SNAKE_CASE = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case__ ( self :Optional[int] , lowercase :numpy.ndarray , lowercase :int , lowercase :bool ) -> None:
"""simple docstring"""
for iteration in range(1 , iterations + 1 ):
SCREAMING_SNAKE_CASE = self.feedforward()
self.back_propagation()
if give_loss:
SCREAMING_SNAKE_CASE = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def snake_case__ ( self :Tuple , lowercase :numpy.ndarray ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = input_arr
SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def a ( a ) ->List[str]:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def a ( a ) ->Tuple:
'''simple docstring'''
return (value) * (1 - (value))
def a ( ) ->Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
SCREAMING_SNAKE_CASE = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
SCREAMING_SNAKE_CASE = TwoHiddenLayerNeuralNetwork(
input_array=lowerCamelCase_ , output_array=lowerCamelCase_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=lowerCamelCase_ , iterations=10 , give_loss=lowerCamelCase_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example() | 201 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : Union[str, Any] ={'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str =[
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__magic_name__ : List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 664 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
UpperCamelCase : Union[str, Any] = logging.get_logger('transformers.models.speecht5')
UpperCamelCase : List[Any] = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
UpperCamelCase : List[str] = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
UpperCamelCase : List[Any] = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
UpperCamelCase : Optional[Any] = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
UpperCamelCase : Optional[Any] = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
UpperCamelCase : Any = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
UpperCamelCase : Optional[int] = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
UpperCamelCase : List[Any] = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
UpperCamelCase : Tuple = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
UpperCamelCase : List[Any] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCamelCase : Union[str, Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCamelCase : List[str] = []
UpperCamelCase : List[Any] = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
UpperCamelCase : Tuple = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
UpperCamelCase : Optional[int] = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
UpperCamelCase : Optional[Any] = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : List[str] ):
for attribute in key.split(""".""" ):
lowerCamelCase__ = getattr(lowerCamelCase_ , lowerCamelCase_ )
if weight_type is not None:
lowerCamelCase__ = getattr(lowerCamelCase_ , lowerCamelCase_ ).shape
else:
lowerCamelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCamelCase__ = value
elif weight_type == "weight_g":
lowerCamelCase__ = value
elif weight_type == "weight_v":
lowerCamelCase__ = value
elif weight_type == "bias":
lowerCamelCase__ = value
elif weight_type == "running_mean":
lowerCamelCase__ = value
elif weight_type == "running_var":
lowerCamelCase__ = value
elif weight_type == "num_batches_tracked":
lowerCamelCase__ = value
else:
lowerCamelCase__ = value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict ):
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCamelCase__ , lowerCamelCase__ = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] ):
lowerCamelCase__ = []
if task == "s2t":
lowerCamelCase__ = hf_model.speechta.encoder.prenet.feature_encoder
lowerCamelCase__ = MAPPING_S2T
lowerCamelCase__ = IGNORE_KEYS_S2T
elif task == "t2s":
lowerCamelCase__ = None
lowerCamelCase__ = MAPPING_T2S
lowerCamelCase__ = IGNORE_KEYS_T2S
elif task == "s2s":
lowerCamelCase__ = hf_model.speechta.encoder.prenet.feature_encoder
lowerCamelCase__ = MAPPING_S2S
lowerCamelCase__ = IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(lowerCamelCase_ , lowerCamelCase_ ):
logger.info(F'''{name} was ignored''' )
continue
lowerCamelCase__ = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == """group""" , )
lowerCamelCase__ = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
lowerCamelCase__ , lowerCamelCase__ = key.split(""".*.""" )
if prefix in name and suffix in name:
lowerCamelCase__ = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
lowerCamelCase__ = True
if "*" in mapped_key:
lowerCamelCase__ = name.split(lowerCamelCase_ )[0].split(""".""" )[-2]
lowerCamelCase__ = mapped_key.replace("""*""" , lowerCamelCase_ )
if "weight_g" in name:
lowerCamelCase__ = """weight_g"""
elif "weight_v" in name:
lowerCamelCase__ = """weight_v"""
elif "bias" in name:
lowerCamelCase__ = """bias"""
elif "weight" in name:
lowerCamelCase__ = """weight"""
elif "running_mean" in name:
lowerCamelCase__ = """running_mean"""
elif "running_var" in name:
lowerCamelCase__ = """running_var"""
elif "num_batches_tracked" in name:
lowerCamelCase__ = """num_batches_tracked"""
else:
lowerCamelCase__ = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
continue
if not is_used:
unused_weights.append(lowerCamelCase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ):
lowerCamelCase__ = full_name.split("""conv_layers.""" )[-1]
lowerCamelCase__ = name.split(""".""" )
lowerCamelCase__ = int(items[0] )
lowerCamelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowerCamelCase__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowerCamelCase__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowerCamelCase__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowerCamelCase__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase_ )
@torch.no_grad()
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : List[Any]=None , ):
if config_path is not None:
lowerCamelCase__ = SpeechTaConfig.from_pretrained(lowerCamelCase_ )
else:
lowerCamelCase__ = SpeechTaConfig()
if task == "s2t":
lowerCamelCase__ = config.max_text_positions
lowerCamelCase__ = SpeechTaForSpeechToText(lowerCamelCase_ )
elif task == "t2s":
lowerCamelCase__ = 1876
lowerCamelCase__ = 600
lowerCamelCase__ = config.max_speech_positions
lowerCamelCase__ = SpeechTaForTextToSpeech(lowerCamelCase_ )
elif task == "s2s":
lowerCamelCase__ = 1876
lowerCamelCase__ = config.max_speech_positions
lowerCamelCase__ = SpeechTaForSpeechToSpeech(lowerCamelCase_ )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
lowerCamelCase__ = SpeechTaTokenizer(lowerCamelCase_ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken("""<mask>""" , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ )
lowerCamelCase__ = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
lowerCamelCase__ = SpeechTaFeatureExtractor()
lowerCamelCase__ = SpeechTaProcessor(tokenizer=lowerCamelCase_ , feature_extractor=lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ = torch.load(lowerCamelCase_ )
recursively_load_weights(fairseq_checkpoint["""model"""] , lowerCamelCase_ , lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCamelCase_ )
model.push_to_hub(lowerCamelCase_ )
if __name__ == "__main__":
UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
UpperCamelCase : List[Any] = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 50 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ : Optional[Any] ={
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : int =['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Dict =[
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Tuple =[
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__magic_name__ : int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 664 | 0 |
'''simple docstring'''
def UpperCAmelCase_ ( A ):
'''simple docstring'''
_a : List[str] = 0
while len(lowerCamelCase_ ) > 1:
_a : str = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
_a : Dict = files.index(min(lowerCamelCase_ ) )
temp += files[min_index]
files.pop(lowerCamelCase_ )
files.append(lowerCamelCase_ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
__magic_name__ : str ={
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
__magic_name__ : Tuple ={
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def __snake_case ( lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
__magic_name__ = (images / 2 + 0.5).clamp(0 , 1 )
__magic_name__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__magic_name__ = numpy_to_pil(lowerCamelCase_ )
return images
def __snake_case ( lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
if images.ndim == 3:
__magic_name__ = images[None, ...]
__magic_name__ = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__magic_name__ = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
__magic_name__ = [Image.fromarray(lowerCamelCase_ ) for image in images]
return pil_images
| 664 | 0 |
"""simple docstring"""
import torch
from transformers import AutoModel
class lowerCAmelCase__ ( torch.nn.Module ):
def __init__( self , UpperCamelCase__="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(_lowerCamelCase , self ).__init__()
A__ = AutoModel.from_pretrained(_lowerCamelCase , return_dict=_lowerCamelCase )
A__ = torch.nn.CosineSimilarity(3 , 1e-08 )
A__ = torch.nn.Softmax(dim=1 )
def lowercase_ ( self , **UpperCamelCase__ ):
'''simple docstring'''
return self.bert(**_lowerCamelCase ).last_hidden_state
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=_lowerCamelCase )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(_lowerCamelCase , _lowerCamelCase ) )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = W_supports["sizes"].tolist()
A__ = W_supports["start_token_id"].item()
A__ = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
A__ = self.BERT(**_lowerCamelCase )
A__ = self.BERT(**_lowerCamelCase )
A__ = None
A__ = None
A__ = W_supports["input_ids"] == start_token_id
A__ = W_supports["input_ids"] == end_token_id
for i, size in enumerate(_lowerCamelCase ):
if i == 0:
A__ = 0
else:
A__ = support_sizes[i - 1]
A__ = S[s : s + size][start_token_masks[s : s + size]]
A__ = S[s : s + size][end_token_masks[s : s + size]]
A__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
A__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
A__ = torch.vstack((p_starts, p_start) )
A__ = torch.vstack((p_ends, p_end) )
else:
A__ = p_start
A__ = p_end
return p_starts, p_ends | 337 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__magic_name__ : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(
A , r'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __A ( self : Any , _lowerCamelCase : GenericTensor ) -> np.ndarray:
if self.framework == "tf":
__magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase )
else:
raise ValueError("Unsupported framework" )
return masked_index
def __A ( self : str , _lowerCamelCase : GenericTensor ) -> np.ndarray:
__magic_name__ = self.get_masked_index(_lowerCamelCase )
__magic_name__ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def __A ( self : int , _lowerCamelCase : GenericTensor ) -> Any:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowerCamelCase )
def __A ( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Any=None , **_lowerCamelCase : List[str] ) -> Dict[str, GenericTensor]:
if return_tensors is None:
__magic_name__ = self.framework
__magic_name__ = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase )
self.ensure_exactly_one_mask_token(_lowerCamelCase )
return model_inputs
def __A ( self : List[str] , _lowerCamelCase : int ) -> List[Any]:
__magic_name__ = self.model(**_lowerCamelCase )
__magic_name__ = model_inputs["input_ids"]
return model_outputs
def __A ( self : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any]=5 , _lowerCamelCase : Dict=None ) -> Dict:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__magic_name__ = target_ids.shape[0]
__magic_name__ = model_outputs["input_ids"][0]
__magic_name__ = model_outputs["logits"]
if self.framework == "tf":
__magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__magic_name__ = outputs.numpy()
__magic_name__ = outputs[0, masked_index, :]
__magic_name__ = stable_softmax(_lowerCamelCase , axis=-1 )
if target_ids is not None:
__magic_name__ = tf.gather_nd(tf.squeeze(_lowerCamelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
__magic_name__ = tf.expand_dims(_lowerCamelCase , 0 )
__magic_name__ = tf.math.top_k(_lowerCamelCase , k=_lowerCamelCase )
__magic_name__ , __magic_name__ = topk.values.numpy(), topk.indices.numpy()
else:
__magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__magic_name__ = outputs[0, masked_index, :]
__magic_name__ = logits.softmax(dim=-1 )
if target_ids is not None:
__magic_name__ = probs[..., target_ids]
__magic_name__ , __magic_name__ = probs.topk(_lowerCamelCase )
__magic_name__ = []
__magic_name__ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__magic_name__ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__magic_name__ = input_ids.numpy().copy()
if target_ids is not None:
__magic_name__ = target_ids[p].tolist()
__magic_name__ = p
# Filter padding out:
__magic_name__ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__magic_name__ = self.tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
__magic_name__ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(_lowerCamelCase )
result.append(_lowerCamelCase )
if single_mask:
return result[0]
return result
def __A ( self : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[Any]=None ) -> List[str]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = [targets]
try:
__magic_name__ = self.tokenizer.get_vocab()
except Exception:
__magic_name__ = {}
__magic_name__ = []
for target in targets:
__magic_name__ = vocab.get(_lowerCamelCase , _lowerCamelCase )
if id_ is None:
__magic_name__ = self.tokenizer(
_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , max_length=1 , truncation=_lowerCamelCase , )["input_ids"]
if len(_lowerCamelCase ) == 0:
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
"We cannot replace it with anything meaningful, ignoring it" )
continue
__magic_name__ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
__magic_name__ = list(set(_lowerCamelCase ) )
if len(_lowerCamelCase ) == 0:
raise ValueError("At least one target must be provided when passed." )
__magic_name__ = np.array(_lowerCamelCase )
return target_ids
def __A ( self : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : int=None ) -> Tuple:
__magic_name__ = {}
if targets is not None:
__magic_name__ = self.get_target_ids(_lowerCamelCase , _lowerCamelCase )
__magic_name__ = target_ids
if top_k is not None:
__magic_name__ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__( self : int , _lowerCamelCase : Any , *_lowerCamelCase : str , **_lowerCamelCase : int ) -> Optional[int]:
__magic_name__ = super().__call__(_lowerCamelCase , **_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) == 1:
return outputs[0]
return outputs
| 664 | 0 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[int] = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'b0': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 224,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 240,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 1408,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 260,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 1536,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 300,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 1792,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 380,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 2048,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 456,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 2304,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 528,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 2560,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 600,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def A ( _SCREAMING_SNAKE_CASE ) -> List[str]:
lowerCamelCase : List[Any] = EfficientNetConfig()
lowerCamelCase : Union[str, Any] = CONFIG_MAP[model_name]["hidden_dim"]
lowerCamelCase : Optional[int] = CONFIG_MAP[model_name]["width_coef"]
lowerCamelCase : Optional[int] = CONFIG_MAP[model_name]["depth_coef"]
lowerCamelCase : List[Any] = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : Tuple = CONFIG_MAP[model_name]["dropout_rate"]
lowerCamelCase : int = CONFIG_MAP[model_name]["dw_padding"]
lowerCamelCase : int = "huggingface/label-files"
lowerCamelCase : Optional[int] = "imagenet-1k-id2label.json"
lowerCamelCase : Dict = 1000
lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ ,lowerCamelCase_ ,repo_type="dataset" ) ,"r" ) )
lowerCamelCase : Union[str, Any] = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
lowerCamelCase : Optional[Any] = idalabel
lowerCamelCase : int = {v: k for k, v in idalabel.items()}
return config
def A ( ) -> Dict:
lowerCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : Any = Image.open(requests.get(lowerCamelCase_ ,stream=lowerCamelCase_ ).raw )
return im
def A ( _SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : str = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : Any = EfficientNetImageProcessor(
size={"height": size, "width": size} ,image_mean=[0.485, 0.456, 0.406] ,image_std=[0.47853944, 0.4732864, 0.47434163] ,do_center_crop=lowerCamelCase_ ,)
return preprocessor
def A ( _SCREAMING_SNAKE_CASE ) -> Any:
lowerCamelCase : List[str] = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
lowerCamelCase : List[str] = sorted(set(lowerCamelCase_ ) )
lowerCamelCase : Tuple = len(lowerCamelCase_ )
lowerCamelCase : Union[str, Any] = {b: str(lowerCamelCase_ ) for b, i in zip(lowerCamelCase_ ,range(lowerCamelCase_ ) )}
lowerCamelCase : Union[str, Any] = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
lowerCamelCase : Dict = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
lowerCamelCase : Union[str, Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
lowerCamelCase : Optional[Any] = "efficientnet." + item[1]
lowerCamelCase : Union[str, Any] = "classifier.weight"
lowerCamelCase : Tuple = "classifier.bias"
return key_mapping
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowerCamelCase : Union[str, Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowerCamelCase : Dict = torch.from_numpy(lowerCamelCase_ ).permute(3 ,2 ,0 ,1 )
elif "depthwise_kernel" in key:
lowerCamelCase : Dict = torch.from_numpy(lowerCamelCase_ ).permute(2 ,3 ,0 ,1 )
elif "kernel" in key:
lowerCamelCase : List[Any] = torch.from_numpy(np.transpose(lowerCamelCase_ ) )
else:
lowerCamelCase : Union[str, Any] = torch.from_numpy(lowerCamelCase_ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowerCamelCase_ )
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
lowerCamelCase : int = model_classes[model_name](
include_top=lowerCamelCase_ ,weights="imagenet" ,input_tensor=lowerCamelCase_ ,input_shape=lowerCamelCase_ ,pooling=lowerCamelCase_ ,classes=1000 ,classifier_activation="softmax" ,)
lowerCamelCase : str = original_model.trainable_variables
lowerCamelCase : List[str] = original_model.non_trainable_variables
lowerCamelCase : Optional[Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowerCamelCase : Dict = param.numpy()
lowerCamelCase : int = list(tf_params.keys() )
# Load HuggingFace model
lowerCamelCase : Optional[Any] = get_efficientnet_config(lowerCamelCase_ )
lowerCamelCase : str = EfficientNetForImageClassification(lowerCamelCase_ ).eval()
lowerCamelCase : Tuple = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
lowerCamelCase : Any = rename_keys(lowerCamelCase_ )
replace_params(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# Initialize preprocessor and preprocess input image
lowerCamelCase : Union[str, Any] = convert_image_processor(lowerCamelCase_ )
lowerCamelCase : str = preprocessor(images=prepare_img() ,return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowerCamelCase : Optional[int] = hf_model(**lowerCamelCase_ )
lowerCamelCase : Any = outputs.logits.detach().numpy()
# Original model inference
lowerCamelCase : str = False
lowerCamelCase : Tuple = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : int = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST )
lowerCamelCase : Tuple = image.img_to_array(lowerCamelCase_ )
lowerCamelCase : int = np.expand_dims(lowerCamelCase_ ,axis=0 )
lowerCamelCase : Dict = original_model.predict(lowerCamelCase_ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowerCamelCase_ ):
os.mkdir(lowerCamelCase_ )
# Save converted model and image processor
hf_model.save_pretrained(lowerCamelCase_ )
preprocessor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
lowerCamelCase : List[Any] = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowerCamelCase_ )
hf_model.push_to_hub(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
SCREAMING_SNAKE_CASE__ : Any = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 311 |
'''simple docstring'''
from __future__ import annotations
def __snake_case ( lowerCamelCase_ : list[int] , lowerCamelCase_ : int ):
'''simple docstring'''
if len(lowerCamelCase_ ) < k or k < 0:
raise ValueError("Invalid Input" )
__magic_name__ = __magic_name__ = sum(array[:k] )
for i in range(len(lowerCamelCase_ ) - k ):
__magic_name__ = current_sum - array[i] + array[i + k]
__magic_name__ = max(lowerCamelCase_ , lowerCamelCase_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__magic_name__ : List[str] =[randint(-10_00, 10_00) for i in range(1_00)]
__magic_name__ : List[str] =randint(0, 1_10)
print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 664 | 0 |
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
) | 623 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : int =logging.get_logger(__name__)
__magic_name__ : List[Any] ={}
class UpperCamelCase_ ( A ):
"""simple docstring"""
UpperCAmelCase__ : int = '''llama'''
UpperCAmelCase__ : Any = ['''past_key_values''']
def __init__( self : List[Any] , _lowerCamelCase : List[Any]=3_20_00 , _lowerCamelCase : Optional[Any]=40_96 , _lowerCamelCase : Tuple=1_10_08 , _lowerCamelCase : List[Any]=32 , _lowerCamelCase : Tuple=32 , _lowerCamelCase : List[str]=None , _lowerCamelCase : str="silu" , _lowerCamelCase : Optional[Any]=20_48 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : Union[str, Any]=1e-6 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Dict=0 , _lowerCamelCase : int=1 , _lowerCamelCase : str=2 , _lowerCamelCase : List[Any]=1 , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : List[str]=None , **_lowerCamelCase : List[Any] , ) -> Any:
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = hidden_size
__magic_name__ = intermediate_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__magic_name__ = num_attention_heads
__magic_name__ = num_key_value_heads
__magic_name__ = hidden_act
__magic_name__ = initializer_range
__magic_name__ = rms_norm_eps
__magic_name__ = pretraining_tp
__magic_name__ = use_cache
__magic_name__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase , )
def __A ( self : Union[str, Any] ) -> List[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'got {self.rope_scaling}' )
__magic_name__ = self.rope_scaling.get("type" , _lowerCamelCase )
__magic_name__ = self.rope_scaling.get("factor" , _lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(_lowerCamelCase , _lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 664 | 0 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = XLMTokenizer
a_ = False
def _lowercase ( self : int ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Tuple = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
snake_case__ : Optional[int] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
snake_case__ : Union[str, Any] = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
snake_case__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(_lowerCamelCase ) )
def _lowercase ( self : Optional[int] , __A : Optional[Any] ):
snake_case__ : Optional[int] = "lower newer"
snake_case__ : str = "lower newer"
return input_text, output_text
def _lowercase ( self : int ):
snake_case__ : Tuple = XLMTokenizer(self.vocab_file , self.merges_file )
snake_case__ : Optional[int] = "lower"
snake_case__ : Tuple = ["low", "er</w>"]
snake_case__ : Optional[int] = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
snake_case__ : Any = tokens + ["<unk>"]
snake_case__ : Tuple = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
@slow
def _lowercase ( self : List[str] ):
snake_case__ : Any = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
snake_case__ : Optional[Any] = tokenizer.encode("sequence builders" , add_special_tokens=_lowerCamelCase )
snake_case__ : int = tokenizer.encode("multi-sequence build" , add_special_tokens=_lowerCamelCase )
snake_case__ : List[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
snake_case__ : str = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 297 |
'''simple docstring'''
__magic_name__ : Dict =8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def __snake_case ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __snake_case ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 664 | 0 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class _lowercase ( snake_case_ ):
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Any = tempfile.mkdtemp()
UpperCamelCase_ : Any = 5
# Realm tok
UpperCamelCase_ : Union[str, Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
UpperCamelCase_ : Dict = os.path.join(self.tmpdirname , 'realm_tokenizer' )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
UpperCamelCase_ : Optional[Any] = os.path.join(_lowerCamelCase , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
UpperCamelCase_ : Dict = os.path.join(self.tmpdirname , 'realm_block_records' )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> RealmTokenizer:
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer' ) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = RealmConfig(num_block_records=self.num_block_records )
return config
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Any = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
} )
return dataset
def SCREAMING_SNAKE_CASE__ ( self : str ) -> str:
"""simple docstring"""
UpperCamelCase_ : List[str] = np.array(
[
B'This is the first record',
B'This is the second record',
B'This is the third record',
B'This is the fourth record',
B'This is the fifth record',
B'This is a longer longer longer record',
] , dtype=_lowerCamelCase , )
return block_records
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Dict = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> str:
"""simple docstring"""
UpperCamelCase_ : str = self.get_config()
UpperCamelCase_ : Tuple = self.get_dummy_retriever()
UpperCamelCase_ : Optional[int] = retriever.tokenizer
UpperCamelCase_ : int = np.array([0, 3] , dtype='long' )
UpperCamelCase_ : Optional[Any] = tokenizer(['Test question'] ).input_ids
UpperCamelCase_ : Optional[int] = tokenizer(
['the fourth'] , add_special_tokens=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ).input_ids
UpperCamelCase_ : Optional[Any] = config.reader_seq_len
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : List[str] = retriever(
_lowerCamelCase , _lowerCamelCase , answer_ids=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors='np' )
self.assertEqual(len(_lowerCamelCase ) , 2 )
self.assertEqual(len(_lowerCamelCase ) , 2 )
self.assertEqual(len(_lowerCamelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = self.get_config()
UpperCamelCase_ : List[str] = self.get_dummy_retriever()
UpperCamelCase_ : Dict = retriever.tokenizer
UpperCamelCase_ : int = np.array([0, 3, 5] , dtype='long' )
UpperCamelCase_ : Optional[Any] = tokenizer(['Test question'] ).input_ids
UpperCamelCase_ : Tuple = tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ).input_ids
UpperCamelCase_ : Tuple = config.reader_seq_len
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : Dict = retriever(
_lowerCamelCase , _lowerCamelCase , answer_ids=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors='np' )
self.assertEqual([False, True, True] , _lowerCamelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _lowerCamelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : int = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
# Test local path
UpperCamelCase_ : Tuple = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
self.assertEqual(retriever.block_records[0] , B'This is the first record' )
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download' ) as mock_hf_hub_download:
UpperCamelCase_ : Tuple = os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records' ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCamelCase_ : int = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa' )
self.assertEqual(retriever.block_records[0] , B'This is the first record' )
| 417 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__magic_name__ : List[Any] =logging.getLogger(__name__)
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : str=-1 ) -> List[str]:
# in NER datasets, the last column is usually reserved for NER label
__magic_name__ = label_idx
def __A ( self : Any , _lowerCamelCase : str , _lowerCamelCase : Union[Split, str] ) -> List[InputExample]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = mode.value
__magic_name__ = os.path.join(_lowerCamelCase , f'{mode}.txt' )
__magic_name__ = 1
__magic_name__ = []
with open(_lowerCamelCase , encoding="utf-8" ) as f:
__magic_name__ = []
__magic_name__ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
__magic_name__ = []
__magic_name__ = []
else:
__magic_name__ = line.split(" " )
words.append(splits[0] )
if len(_lowerCamelCase ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
return examples
def __A ( self : Optional[Any] , _lowerCamelCase : TextIO , _lowerCamelCase : TextIO , _lowerCamelCase : List ) -> Union[str, Any]:
__magic_name__ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(_lowerCamelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__magic_name__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(_lowerCamelCase )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def __A ( self : Tuple , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
__magic_name__ = f.read().splitlines()
if "O" not in labels:
__magic_name__ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __init__( self : int ) -> str:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __A ( self : int , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
__magic_name__ = f.read().splitlines()
if "O" not in labels:
__magic_name__ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __A ( self : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[Split, str] ) -> List[InputExample]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = mode.value
__magic_name__ = os.path.join(_lowerCamelCase , f'{mode}.txt' )
__magic_name__ = 1
__magic_name__ = []
with open(_lowerCamelCase , encoding="utf-8" ) as f:
for sentence in parse_incr(_lowerCamelCase ):
__magic_name__ = []
__magic_name__ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
return examples
def __A ( self : Optional[int] , _lowerCamelCase : TextIO , _lowerCamelCase : TextIO , _lowerCamelCase : List ) -> Any:
__magic_name__ = 0
for sentence in parse_incr(_lowerCamelCase ):
__magic_name__ = preds_list[example_id]
__magic_name__ = ""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(_lowerCamelCase )
example_id += 1
def __A ( self : Dict , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 664 | 0 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , ) -> Tuple:
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif stress < 0:
raise ValueError("""Stress cannot be negative""" )
elif tangential_force < 0:
raise ValueError("""Tangential Force cannot be negative""" )
elif area < 0:
raise ValueError("""Area cannot be negative""" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 384 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0 ) -> None:
__magic_name__ , __magic_name__ = row, column
__magic_name__ = [[default_value for c in range(_lowerCamelCase )] for r in range(_lowerCamelCase )]
def __str__( self : Optional[Any] ) -> str:
__magic_name__ = f'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
__magic_name__ = 0
for row_vector in self.array:
for obj in row_vector:
__magic_name__ = max(_lowerCamelCase , len(str(_lowerCamelCase ) ) )
__magic_name__ = f'%{max_element_length}s'
# Make string and return
def single_line(_lowerCamelCase : list[float] ) -> str:
nonlocal string_format_identifier
__magic_name__ = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self : Optional[int] ) -> str:
return str(self )
def __A ( self : Optional[Any] , _lowerCamelCase : tuple[int, int] ) -> bool:
if not (isinstance(_lowerCamelCase , (list, tuple) ) and len(_lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Optional[int] , _lowerCamelCase : tuple[int, int] ) -> Any:
assert self.validate_indicies(_lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Tuple , _lowerCamelCase : tuple[int, int] , _lowerCamelCase : float ) -> None:
assert self.validate_indicies(_lowerCamelCase )
__magic_name__ = value
def __add__( self : Union[str, Any] , _lowerCamelCase : Matrix ) -> Matrix:
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c] + another[r, c]
return result
def __neg__( self : int ) -> Matrix:
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = -self[r, c]
return result
def __sub__( self : Optional[int] , _lowerCamelCase : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : Optional[int] , _lowerCamelCase : int | float | Matrix ) -> Matrix:
if isinstance(_lowerCamelCase , (int, float) ): # Scalar multiplication
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c] * another
return result
elif isinstance(_lowerCamelCase , _lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
__magic_name__ = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__magic_name__ = f'Unsupported type given for another ({type(_lowerCamelCase )})'
raise TypeError(_lowerCamelCase )
def __A ( self : Optional[int] ) -> Matrix:
__magic_name__ = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c]
return result
def __A ( self : int , _lowerCamelCase : Matrix , _lowerCamelCase : Matrix ) -> Any:
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__magic_name__ = v.transpose()
__magic_name__ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = Matrix(3 , 3 , 0 )
for i in range(3 ):
__magic_name__ = 1
print(F'a^(-1) is {ainv}' )
# u, v
__magic_name__ = Matrix(3 , 1 , 0 )
__magic_name__ , __magic_name__ , __magic_name__ = 1, 2, -3
__magic_name__ = Matrix(3 , 1 , 0 )
__magic_name__ , __magic_name__ , __magic_name__ = 4, -2, 5
print(F'u is {u}' )
print(F'v is {v}' )
print(F'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(F'(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase_ , lowerCamelCase_ )}' )
def __snake_case ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 664 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _a , unittest.TestCase ):
_a = OpenAIGPTTokenizer
_a = OpenAIGPTTokenizerFast
_a = True
_a = False
def __lowercase ( self : Dict ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowerCAmelCase = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
lowerCAmelCase = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_lowerCamelCase ) )
def __lowercase ( self : int , lowerCAmelCase : Optional[int] ):
return "lower newer", "lower newer"
def __lowercase ( self : Tuple ):
lowerCAmelCase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase = """lower"""
lowerCAmelCase = ["""low""", """er</w>"""]
lowerCAmelCase = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
lowerCAmelCase = tokens + ["""<unk>"""]
lowerCAmelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
def __lowercase ( self : Dict , lowerCAmelCase : Tuple=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
# Simple input
lowerCAmelCase = """This is a simple input"""
lowerCAmelCase = ["""This is a simple input 1""", """This is a simple input 2"""]
lowerCAmelCase = ("""This is a simple input""", """This is a pair""")
lowerCAmelCase = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
_lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
_lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" , )
def __lowercase ( self : Optional[Any] ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _a ):
pass
| 169 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__magic_name__ : List[Any] =logging.getLogger(__name__)
__magic_name__ : int ='Hello world! cécé herlolip'
__magic_name__ : List[Any] =namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def __snake_case ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict ):
'''simple docstring'''
__magic_name__ = BertAbsConfig(
temp_dir="." , finetune_bert=lowerCamelCase_ , large=lowerCamelCase_ , share_emb=lowerCamelCase_ , use_bert_emb=lowerCamelCase_ , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__magic_name__ = torch.load(lowerCamelCase_ , lambda lowerCamelCase_ , lowerCamelCase_ : storage )
__magic_name__ = AbsSummarizer(lowerCamelCase_ , torch.device("cpu" ) , lowerCamelCase_ )
original.eval()
__magic_name__ = BertAbsSummarizer(lowerCamelCase_ , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
__magic_name__ = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
__magic_name__ = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) )
__magic_name__ = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
__magic_name__ = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) )
__magic_name__ = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__magic_name__ = encoder_input_ids
__magic_name__ = decoder_input_ids
__magic_name__ = __magic_name__ = None
__magic_name__ = None
__magic_name__ = __magic_name__ = None
__magic_name__ = __magic_name__ = None
__magic_name__ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__magic_name__ = original(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
__magic_name__ = original.generator(lowerCamelCase_ )
__magic_name__ = new_model(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
__magic_name__ = new_model.generator(lowerCamelCase_ )
__magic_name__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowerCamelCase_ ) )
__magic_name__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowerCamelCase_ ) )
__magic_name__ = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
__magic_name__ : Dict =argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
__magic_name__ : Any =parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 664 | 0 |
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __lowercase ( __snake_case ):
_A = '''Salesforce/blip-image-captioning-base'''
_A = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
_A = '''image_captioner'''
_A = AutoModelForVisionaSeq
_A = ['''image''']
_A = ['''text''']
def __init__(self : str , *snake_case : str , **snake_case : int ) -> Union[str, Any]:
requires_backends(self , ["vision"] )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
def _a(self : List[str] , snake_case : "Image" ) -> List[Any]:
return self.pre_processor(images=_lowerCamelCase , return_tensors="pt" )
def _a(self : Union[str, Any] , snake_case : Union[str, Any] ) -> Tuple:
return self.model.generate(**_lowerCamelCase )
def _a(self : Optional[int] , snake_case : Dict ) -> List[Any]:
return self.pre_processor.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )[0].strip()
| 461 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ) -> str:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__magic_name__ = [[1, 2, 4], [1, 2, 3, 4]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
self.assertTrue(isinstance(dc.token_ids , _lowerCamelCase ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __A ( self : List[Any] ) -> str:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__magic_name__ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(_lowerCamelCase ) # fails here
def __A ( self : List[Any] ) -> int:
__magic_name__ = [[1, 2, 3], [1, 2, 4]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(3 )
__magic_name__ = stepped is True and completed is True and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __A ( self : Any ) -> Union[str, Any]:
__magic_name__ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 664 | 0 |
__lowerCAmelCase = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
__lowerCAmelCase = ['a', 'b', 'c', 'd', 'e']
def a ( a , a , a ) ->Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = start
# add current to visited
visited.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
SCREAMING_SNAKE_CASE = topological_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# if all neighbors visited add current to sort
sort.append(lowerCamelCase_ )
# if all vertices haven't been visited select a new one to visit
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
for vertice in vertices:
if vertice not in visited:
SCREAMING_SNAKE_CASE = topological_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# return sort
return sort
if __name__ == "__main__":
__lowerCAmelCase = topological_sort('a', [], [])
print(sort) | 201 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__magic_name__ : Dict ={
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_28,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls : Any ) -> Union[str, Any]:
__magic_name__ = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def __A ( cls : Any ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def __A ( self : Optional[Any] ) -> Dict:
__magic_name__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowerCamelCase , repo_id="test-config" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : str ) -> Optional[int]:
__magic_name__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_lowerCamelCase , repo_id="valid_org/test-config-org" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : Optional[int] ) -> Union[str, Any]:
CustomConfig.register_for_auto_class()
__magic_name__ = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
__magic_name__ = AutoConfig.from_pretrained(f'{USER}/test-dynamic-config' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Optional[int] ) -> Optional[Any]:
__magic_name__ = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__magic_name__ = c.n_embd + 1 # int
__magic_name__ = c.resid_pdrop + 1.0 # float
__magic_name__ = not c.scale_attn_weights # bool
__magic_name__ = c.summary_type + "foo" # str
c.update_from_string(
f'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(_lowerCamelCase , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(_lowerCamelCase , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(_lowerCamelCase , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(_lowerCamelCase , c.summary_type , "mismatch for key: summary_type" )
def __A ( self : List[Any] ) -> Union[str, Any]:
__magic_name__ = PretrainedConfig()
__magic_name__ = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_lowerCamelCase , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
__magic_name__ = [key for key, value in config_common_kwargs.items() if value == getattr(_lowerCamelCase , _lowerCamelCase )]
if len(_lowerCamelCase ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f' {", ".join(_lowerCamelCase )}.' )
def __A ( self : List[Any] ) -> List[Any]:
with self.assertRaises(_lowerCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(_lowerCamelCase )
def __A ( self : Tuple ) -> int:
# A mock response for an HTTP head request to emulate server down
__magic_name__ = mock.Mock()
__magic_name__ = 5_00
__magic_name__ = {}
__magic_name__ = HTTPError
__magic_name__ = {}
# Download this model to make sure it's in the cache.
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=_lowerCamelCase ) as mock_head:
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def __A ( self : Union[str, Any] ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
__magic_name__ = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def __A ( self : Dict ) -> Optional[int]:
__magic_name__ = AutoConfig.from_pretrained("bert-base-cased" )
__magic_name__ = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_lowerCamelCase )
__magic_name__ = 2
json.dump(configuration.to_dict() , open(os.path.join(_lowerCamelCase , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__magic_name__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__magic_name__ = ["config.42.0.0.json"]
__magic_name__ = 7_68
configuration.save_pretrained(_lowerCamelCase )
shutil.move(os.path.join(_lowerCamelCase , "config.4.0.0.json" ) , os.path.join(_lowerCamelCase , "config.42.0.0.json" ) )
__magic_name__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def __A ( self : Optional[int] ) -> str:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__magic_name__ = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
__magic_name__ = "v4.0.0"
__magic_name__ , __magic_name__ = new_transformers.models.auto.AutoConfig.from_pretrained(
_lowerCamelCase , return_unused_kwargs=_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_lowerCamelCase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__magic_name__ = "v3.0.0"
__magic_name__ = old_transformers.models.auto.AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 664 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.